1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L4 24 25 #include "amdgpu.h" 26 #include "amdgpu_smu.h" 27 #include "smu_cmn.h" 28 #include "soc15_common.h" 29 30 /* 31 * DO NOT use these for err/warn/info/debug messages. 32 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 33 * They are more MGPU friendly. 34 */ 35 #undef pr_err 36 #undef pr_warn 37 #undef pr_info 38 #undef pr_debug 39 40 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL 41 42 #undef __SMU_DUMMY_MAP 43 #define __SMU_DUMMY_MAP(type) #type 44 static const char * const __smu_message_names[] = { 45 SMU_MESSAGE_TYPES 46 }; 47 48 #define smu_cmn_call_asic_func(intf, smu, args...) \ 49 ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \ 50 (smu)->ppt_funcs->intf(smu, ##args) : \ 51 -ENOTSUPP) : \ 52 -EINVAL) 53 54 static const char *smu_get_message_name(struct smu_context *smu, 55 enum smu_message_type type) 56 { 57 if (type < 0 || type >= SMU_MSG_MAX_COUNT) 58 return "unknown smu message"; 59 60 return __smu_message_names[type]; 61 } 62 63 static void smu_cmn_read_arg(struct smu_context *smu, 64 uint32_t *arg) 65 { 66 struct amdgpu_device *adev = smu->adev; 67 68 *arg = RREG32(smu->param_reg); 69 } 70 71 /* Redefine the SMU error codes here. 72 * 73 * Note that these definitions are redundant and should be removed 74 * when the SMU has exported a unified header file containing these 75 * macros, which header file we can just include and use the SMU's 76 * macros. At the moment, these error codes are defined by the SMU 77 * per-ASIC unfortunately, yet we're a one driver for all ASICs. 78 */ 79 #define SMU_RESP_NONE 0 80 #define SMU_RESP_OK 1 81 #define SMU_RESP_CMD_FAIL 0xFF 82 #define SMU_RESP_CMD_UNKNOWN 0xFE 83 #define SMU_RESP_CMD_BAD_PREREQ 0xFD 84 #define SMU_RESP_BUSY_OTHER 0xFC 85 #define SMU_RESP_DEBUG_END 0xFB 86 87 /** 88 * __smu_cmn_poll_stat -- poll for a status from the SMU 89 * @smu: a pointer to SMU context 90 * 91 * Returns the status of the SMU, which could be, 92 * 0, the SMU is busy with your command; 93 * 1, execution status: success, execution result: success; 94 * 0xFF, execution status: success, execution result: failure; 95 * 0xFE, unknown command; 96 * 0xFD, valid command, but bad (command) prerequisites; 97 * 0xFC, the command was rejected as the SMU is busy; 98 * 0xFB, "SMC_Result_DebugDataDumpEnd". 99 * 100 * The values here are not defined by macros, because I'd rather we 101 * include a single header file which defines them, which is 102 * maintained by the SMU FW team, so that we're impervious to firmware 103 * changes. At the moment those values are defined in various header 104 * files, one for each ASIC, yet here we're a single ASIC-agnostic 105 * interface. Such a change can be followed-up by a subsequent patch. 106 */ 107 static u32 __smu_cmn_poll_stat(struct smu_context *smu) 108 { 109 struct amdgpu_device *adev = smu->adev; 110 int timeout = adev->usec_timeout * 20; 111 u32 reg; 112 113 for ( ; timeout > 0; timeout--) { 114 reg = RREG32(smu->resp_reg); 115 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0) 116 break; 117 118 udelay(1); 119 } 120 121 return reg; 122 } 123 124 static void __smu_cmn_reg_print_error(struct smu_context *smu, 125 u32 reg_c2pmsg_90, 126 int msg_index, 127 u32 param, 128 enum smu_message_type msg) 129 { 130 struct amdgpu_device *adev = smu->adev; 131 const char *message = smu_get_message_name(smu, msg); 132 u32 msg_idx, prm; 133 134 switch (reg_c2pmsg_90) { 135 case SMU_RESP_NONE: { 136 msg_idx = RREG32(smu->msg_reg); 137 prm = RREG32(smu->param_reg); 138 dev_err_ratelimited(adev->dev, 139 "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", 140 msg_idx, prm); 141 } 142 break; 143 case SMU_RESP_OK: 144 /* The SMU executed the command. It completed with a 145 * successful result. 146 */ 147 break; 148 case SMU_RESP_CMD_FAIL: 149 /* The SMU executed the command. It completed with an 150 * unsuccessful result. 151 */ 152 break; 153 case SMU_RESP_CMD_UNKNOWN: 154 dev_err_ratelimited(adev->dev, 155 "SMU: unknown command: index:%d param:0x%08X message:%s", 156 msg_index, param, message); 157 break; 158 case SMU_RESP_CMD_BAD_PREREQ: 159 dev_err_ratelimited(adev->dev, 160 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s", 161 msg_index, param, message); 162 break; 163 case SMU_RESP_BUSY_OTHER: 164 dev_err_ratelimited(adev->dev, 165 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", 166 msg_index, param, message); 167 break; 168 case SMU_RESP_DEBUG_END: 169 dev_err_ratelimited(adev->dev, 170 "SMU: I'm debugging!"); 171 break; 172 default: 173 dev_err_ratelimited(adev->dev, 174 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", 175 reg_c2pmsg_90, msg_index, param, message); 176 break; 177 } 178 } 179 180 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90) 181 { 182 int res; 183 184 switch (reg_c2pmsg_90) { 185 case SMU_RESP_NONE: 186 /* The SMU is busy--still executing your command. 187 */ 188 res = -ETIME; 189 break; 190 case SMU_RESP_OK: 191 res = 0; 192 break; 193 case SMU_RESP_CMD_FAIL: 194 /* Command completed successfully, but the command 195 * status was failure. 196 */ 197 res = -EIO; 198 break; 199 case SMU_RESP_CMD_UNKNOWN: 200 /* Unknown command--ignored by the SMU. 201 */ 202 res = -EOPNOTSUPP; 203 break; 204 case SMU_RESP_CMD_BAD_PREREQ: 205 /* Valid command--bad prerequisites. 206 */ 207 res = -EINVAL; 208 break; 209 case SMU_RESP_BUSY_OTHER: 210 /* The SMU is busy with other commands. The client 211 * should retry in 10 us. 212 */ 213 res = -EBUSY; 214 break; 215 default: 216 /* Unknown or debug response from the SMU. 217 */ 218 res = -EREMOTEIO; 219 break; 220 } 221 222 return res; 223 } 224 225 static void __smu_cmn_send_msg(struct smu_context *smu, 226 u16 msg, 227 u32 param) 228 { 229 struct amdgpu_device *adev = smu->adev; 230 231 WREG32(smu->resp_reg, 0); 232 WREG32(smu->param_reg, param); 233 WREG32(smu->msg_reg, msg); 234 } 235 236 /** 237 * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status 238 * @smu: pointer to an SMU context 239 * @msg_index: message index 240 * @param: message parameter to send to the SMU 241 * 242 * Send a message to the SMU with the parameter passed. Do not wait 243 * for status/result of the message, thus the "without_waiting". 244 * 245 * Return 0 on success, -errno on error if we weren't able to _send_ 246 * the message for some reason. See __smu_cmn_reg2errno() for details 247 * of the -errno. 248 */ 249 int smu_cmn_send_msg_without_waiting(struct smu_context *smu, 250 uint16_t msg_index, 251 uint32_t param) 252 { 253 struct amdgpu_device *adev = smu->adev; 254 u32 reg; 255 int res; 256 257 if (adev->no_hw_access) 258 return 0; 259 260 reg = __smu_cmn_poll_stat(smu); 261 res = __smu_cmn_reg2errno(smu, reg); 262 if (reg == SMU_RESP_NONE || 263 res == -EREMOTEIO) 264 goto Out; 265 __smu_cmn_send_msg(smu, msg_index, param); 266 res = 0; 267 Out: 268 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && 269 res && (res != -ETIME)) { 270 amdgpu_device_halt(adev); 271 WARN_ON(1); 272 } 273 274 return res; 275 } 276 277 /** 278 * smu_cmn_wait_for_response -- wait for response from the SMU 279 * @smu: pointer to an SMU context 280 * 281 * Wait for status from the SMU. 282 * 283 * Return 0 on success, -errno on error, indicating the execution 284 * status and result of the message being waited for. See 285 * __smu_cmn_reg2errno() for details of the -errno. 286 */ 287 int smu_cmn_wait_for_response(struct smu_context *smu) 288 { 289 u32 reg; 290 int res; 291 292 reg = __smu_cmn_poll_stat(smu); 293 res = __smu_cmn_reg2errno(smu, reg); 294 295 if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && 296 res && (res != -ETIME)) { 297 amdgpu_device_halt(smu->adev); 298 WARN_ON(1); 299 } 300 301 return res; 302 } 303 304 /** 305 * smu_cmn_send_smc_msg_with_param -- send a message with parameter 306 * @smu: pointer to an SMU context 307 * @msg: message to send 308 * @param: parameter to send to the SMU 309 * @read_arg: pointer to u32 to return a value from the SMU back 310 * to the caller 311 * 312 * Send the message @msg with parameter @param to the SMU, wait for 313 * completion of the command, and return back a value from the SMU in 314 * @read_arg pointer. 315 * 316 * Return 0 on success, -errno when a problem is encountered sending 317 * message or receiving reply. If there is a PCI bus recovery or 318 * the destination is a virtual GPU which does not allow this message 319 * type, the message is simply dropped and success is also returned. 320 * See __smu_cmn_reg2errno() for details of the -errno. 321 * 322 * If we weren't able to send the message to the SMU, we also print 323 * the error to the standard log. 324 * 325 * Command completion status is printed only if the -errno is 326 * -EREMOTEIO, indicating that the SMU returned back an 327 * undefined/unknown/unspecified result. All other cases are 328 * well-defined, not printed, but instead given back to the client to 329 * decide what further to do. 330 * 331 * The return value, @read_arg is read back regardless, to give back 332 * more information to the client, which on error would most likely be 333 * @param, but we can't assume that. This also eliminates more 334 * conditionals. 335 */ 336 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, 337 enum smu_message_type msg, 338 uint32_t param, 339 uint32_t *read_arg) 340 { 341 struct amdgpu_device *adev = smu->adev; 342 int res, index; 343 u32 reg; 344 345 if (adev->no_hw_access) 346 return 0; 347 348 index = smu_cmn_to_asic_specific_index(smu, 349 CMN2ASIC_MAPPING_MSG, 350 msg); 351 if (index < 0) 352 return index == -EACCES ? 0 : index; 353 354 mutex_lock(&smu->message_lock); 355 reg = __smu_cmn_poll_stat(smu); 356 res = __smu_cmn_reg2errno(smu, reg); 357 if (reg == SMU_RESP_NONE || 358 res == -EREMOTEIO) { 359 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 360 goto Out; 361 } 362 __smu_cmn_send_msg(smu, (uint16_t) index, param); 363 reg = __smu_cmn_poll_stat(smu); 364 res = __smu_cmn_reg2errno(smu, reg); 365 if (res != 0) 366 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 367 if (read_arg) 368 smu_cmn_read_arg(smu, read_arg); 369 Out: 370 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) { 371 amdgpu_device_halt(adev); 372 WARN_ON(1); 373 } 374 375 mutex_unlock(&smu->message_lock); 376 return res; 377 } 378 379 int smu_cmn_send_smc_msg(struct smu_context *smu, 380 enum smu_message_type msg, 381 uint32_t *read_arg) 382 { 383 return smu_cmn_send_smc_msg_with_param(smu, 384 msg, 385 0, 386 read_arg); 387 } 388 389 int smu_cmn_to_asic_specific_index(struct smu_context *smu, 390 enum smu_cmn2asic_mapping_type type, 391 uint32_t index) 392 { 393 struct cmn2asic_msg_mapping msg_mapping; 394 struct cmn2asic_mapping mapping; 395 396 switch (type) { 397 case CMN2ASIC_MAPPING_MSG: 398 if (index >= SMU_MSG_MAX_COUNT || 399 !smu->message_map) 400 return -EINVAL; 401 402 msg_mapping = smu->message_map[index]; 403 if (!msg_mapping.valid_mapping) 404 return -EINVAL; 405 406 if (amdgpu_sriov_vf(smu->adev) && 407 !msg_mapping.valid_in_vf) 408 return -EACCES; 409 410 return msg_mapping.map_to; 411 412 case CMN2ASIC_MAPPING_CLK: 413 if (index >= SMU_CLK_COUNT || 414 !smu->clock_map) 415 return -EINVAL; 416 417 mapping = smu->clock_map[index]; 418 if (!mapping.valid_mapping) 419 return -EINVAL; 420 421 return mapping.map_to; 422 423 case CMN2ASIC_MAPPING_FEATURE: 424 if (index >= SMU_FEATURE_COUNT || 425 !smu->feature_map) 426 return -EINVAL; 427 428 mapping = smu->feature_map[index]; 429 if (!mapping.valid_mapping) 430 return -EINVAL; 431 432 return mapping.map_to; 433 434 case CMN2ASIC_MAPPING_TABLE: 435 if (index >= SMU_TABLE_COUNT || 436 !smu->table_map) 437 return -EINVAL; 438 439 mapping = smu->table_map[index]; 440 if (!mapping.valid_mapping) 441 return -EINVAL; 442 443 return mapping.map_to; 444 445 case CMN2ASIC_MAPPING_PWR: 446 if (index >= SMU_POWER_SOURCE_COUNT || 447 !smu->pwr_src_map) 448 return -EINVAL; 449 450 mapping = smu->pwr_src_map[index]; 451 if (!mapping.valid_mapping) 452 return -EINVAL; 453 454 return mapping.map_to; 455 456 case CMN2ASIC_MAPPING_WORKLOAD: 457 if (index > PP_SMC_POWER_PROFILE_WINDOW3D || 458 !smu->workload_map) 459 return -EINVAL; 460 461 mapping = smu->workload_map[index]; 462 if (!mapping.valid_mapping) 463 return -EINVAL; 464 465 return mapping.map_to; 466 467 default: 468 return -EINVAL; 469 } 470 } 471 472 int smu_cmn_feature_is_supported(struct smu_context *smu, 473 enum smu_feature_mask mask) 474 { 475 struct smu_feature *feature = &smu->smu_feature; 476 int feature_id; 477 478 feature_id = smu_cmn_to_asic_specific_index(smu, 479 CMN2ASIC_MAPPING_FEATURE, 480 mask); 481 if (feature_id < 0) 482 return 0; 483 484 WARN_ON(feature_id > feature->feature_num); 485 486 return test_bit(feature_id, feature->supported); 487 } 488 489 static int __smu_get_enabled_features(struct smu_context *smu, 490 uint64_t *enabled_features) 491 { 492 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features); 493 } 494 495 int smu_cmn_feature_is_enabled(struct smu_context *smu, 496 enum smu_feature_mask mask) 497 { 498 struct amdgpu_device *adev = smu->adev; 499 uint64_t enabled_features; 500 int feature_id; 501 502 if (__smu_get_enabled_features(smu, &enabled_features)) { 503 dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n"); 504 return 0; 505 } 506 507 /* 508 * For Renoir and Cyan Skillfish, they are assumed to have all features 509 * enabled. Also considering they have no feature_map available, the 510 * check here can avoid unwanted feature_map check below. 511 */ 512 if (enabled_features == ULLONG_MAX) 513 return 1; 514 515 feature_id = smu_cmn_to_asic_specific_index(smu, 516 CMN2ASIC_MAPPING_FEATURE, 517 mask); 518 if (feature_id < 0) 519 return 0; 520 521 return test_bit(feature_id, (unsigned long *)&enabled_features); 522 } 523 524 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, 525 enum smu_clk_type clk_type) 526 { 527 enum smu_feature_mask feature_id = 0; 528 529 switch (clk_type) { 530 case SMU_MCLK: 531 case SMU_UCLK: 532 feature_id = SMU_FEATURE_DPM_UCLK_BIT; 533 break; 534 case SMU_GFXCLK: 535 case SMU_SCLK: 536 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 537 break; 538 case SMU_SOCCLK: 539 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 540 break; 541 case SMU_VCLK: 542 case SMU_VCLK1: 543 feature_id = SMU_FEATURE_DPM_VCLK_BIT; 544 break; 545 case SMU_DCLK: 546 case SMU_DCLK1: 547 feature_id = SMU_FEATURE_DPM_DCLK_BIT; 548 break; 549 case SMU_FCLK: 550 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 551 break; 552 default: 553 return true; 554 } 555 556 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 557 return false; 558 559 return true; 560 } 561 562 int smu_cmn_get_enabled_mask(struct smu_context *smu, 563 uint64_t *feature_mask) 564 { 565 uint32_t *feature_mask_high; 566 uint32_t *feature_mask_low; 567 int ret = 0, index = 0; 568 569 if (!feature_mask) 570 return -EINVAL; 571 572 feature_mask_low = &((uint32_t *)feature_mask)[0]; 573 feature_mask_high = &((uint32_t *)feature_mask)[1]; 574 575 index = smu_cmn_to_asic_specific_index(smu, 576 CMN2ASIC_MAPPING_MSG, 577 SMU_MSG_GetEnabledSmuFeatures); 578 if (index > 0) { 579 ret = smu_cmn_send_smc_msg_with_param(smu, 580 SMU_MSG_GetEnabledSmuFeatures, 581 0, 582 feature_mask_low); 583 if (ret) 584 return ret; 585 586 ret = smu_cmn_send_smc_msg_with_param(smu, 587 SMU_MSG_GetEnabledSmuFeatures, 588 1, 589 feature_mask_high); 590 } else { 591 ret = smu_cmn_send_smc_msg(smu, 592 SMU_MSG_GetEnabledSmuFeaturesHigh, 593 feature_mask_high); 594 if (ret) 595 return ret; 596 597 ret = smu_cmn_send_smc_msg(smu, 598 SMU_MSG_GetEnabledSmuFeaturesLow, 599 feature_mask_low); 600 } 601 602 return ret; 603 } 604 605 uint64_t smu_cmn_get_indep_throttler_status( 606 const unsigned long dep_status, 607 const uint8_t *throttler_map) 608 { 609 uint64_t indep_status = 0; 610 uint8_t dep_bit = 0; 611 612 for_each_set_bit(dep_bit, &dep_status, 32) 613 indep_status |= 1ULL << throttler_map[dep_bit]; 614 615 return indep_status; 616 } 617 618 int smu_cmn_feature_update_enable_state(struct smu_context *smu, 619 uint64_t feature_mask, 620 bool enabled) 621 { 622 int ret = 0; 623 624 if (enabled) { 625 ret = smu_cmn_send_smc_msg_with_param(smu, 626 SMU_MSG_EnableSmuFeaturesLow, 627 lower_32_bits(feature_mask), 628 NULL); 629 if (ret) 630 return ret; 631 ret = smu_cmn_send_smc_msg_with_param(smu, 632 SMU_MSG_EnableSmuFeaturesHigh, 633 upper_32_bits(feature_mask), 634 NULL); 635 } else { 636 ret = smu_cmn_send_smc_msg_with_param(smu, 637 SMU_MSG_DisableSmuFeaturesLow, 638 lower_32_bits(feature_mask), 639 NULL); 640 if (ret) 641 return ret; 642 ret = smu_cmn_send_smc_msg_with_param(smu, 643 SMU_MSG_DisableSmuFeaturesHigh, 644 upper_32_bits(feature_mask), 645 NULL); 646 } 647 648 return ret; 649 } 650 651 int smu_cmn_feature_set_enabled(struct smu_context *smu, 652 enum smu_feature_mask mask, 653 bool enable) 654 { 655 int feature_id; 656 657 feature_id = smu_cmn_to_asic_specific_index(smu, 658 CMN2ASIC_MAPPING_FEATURE, 659 mask); 660 if (feature_id < 0) 661 return -EINVAL; 662 663 return smu_cmn_feature_update_enable_state(smu, 664 1ULL << feature_id, 665 enable); 666 } 667 668 #undef __SMU_DUMMY_MAP 669 #define __SMU_DUMMY_MAP(fea) #fea 670 static const char* __smu_feature_names[] = { 671 SMU_FEATURE_MASKS 672 }; 673 674 static const char *smu_get_feature_name(struct smu_context *smu, 675 enum smu_feature_mask feature) 676 { 677 if (feature < 0 || feature >= SMU_FEATURE_COUNT) 678 return "unknown smu feature"; 679 return __smu_feature_names[feature]; 680 } 681 682 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, 683 char *buf) 684 { 685 int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)]; 686 uint64_t feature_mask; 687 int i, feature_index; 688 uint32_t count = 0; 689 size_t size = 0; 690 691 if (__smu_get_enabled_features(smu, &feature_mask)) 692 return 0; 693 694 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n", 695 upper_32_bits(feature_mask), lower_32_bits(feature_mask)); 696 697 memset(sort_feature, -1, sizeof(sort_feature)); 698 699 for (i = 0; i < SMU_FEATURE_COUNT; i++) { 700 feature_index = smu_cmn_to_asic_specific_index(smu, 701 CMN2ASIC_MAPPING_FEATURE, 702 i); 703 if (feature_index < 0) 704 continue; 705 706 sort_feature[feature_index] = i; 707 } 708 709 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n", 710 "No", "Feature", "Bit", "State"); 711 712 for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) { 713 if (sort_feature[feature_index] < 0) 714 continue; 715 716 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n", 717 count++, 718 smu_get_feature_name(smu, sort_feature[feature_index]), 719 feature_index, 720 !!test_bit(feature_index, (unsigned long *)&feature_mask) ? 721 "enabled" : "disabled"); 722 } 723 724 return size; 725 } 726 727 int smu_cmn_set_pp_feature_mask(struct smu_context *smu, 728 uint64_t new_mask) 729 { 730 int ret = 0; 731 uint64_t feature_mask; 732 uint64_t feature_2_enabled = 0; 733 uint64_t feature_2_disabled = 0; 734 735 ret = __smu_get_enabled_features(smu, &feature_mask); 736 if (ret) 737 return ret; 738 739 feature_2_enabled = ~feature_mask & new_mask; 740 feature_2_disabled = feature_mask & ~new_mask; 741 742 if (feature_2_enabled) { 743 ret = smu_cmn_feature_update_enable_state(smu, 744 feature_2_enabled, 745 true); 746 if (ret) 747 return ret; 748 } 749 if (feature_2_disabled) { 750 ret = smu_cmn_feature_update_enable_state(smu, 751 feature_2_disabled, 752 false); 753 if (ret) 754 return ret; 755 } 756 757 return ret; 758 } 759 760 /** 761 * smu_cmn_disable_all_features_with_exception - disable all dpm features 762 * except this specified by 763 * @mask 764 * 765 * @smu: smu_context pointer 766 * @mask: the dpm feature which should not be disabled 767 * SMU_FEATURE_COUNT: no exception, all dpm features 768 * to disable 769 * 770 * Returns: 771 * 0 on success or a negative error code on failure. 772 */ 773 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, 774 enum smu_feature_mask mask) 775 { 776 uint64_t features_to_disable = U64_MAX; 777 int skipped_feature_id; 778 779 if (mask != SMU_FEATURE_COUNT) { 780 skipped_feature_id = smu_cmn_to_asic_specific_index(smu, 781 CMN2ASIC_MAPPING_FEATURE, 782 mask); 783 if (skipped_feature_id < 0) 784 return -EINVAL; 785 786 features_to_disable &= ~(1ULL << skipped_feature_id); 787 } 788 789 return smu_cmn_feature_update_enable_state(smu, 790 features_to_disable, 791 0); 792 } 793 794 int smu_cmn_get_smc_version(struct smu_context *smu, 795 uint32_t *if_version, 796 uint32_t *smu_version) 797 { 798 int ret = 0; 799 800 if (!if_version && !smu_version) 801 return -EINVAL; 802 803 if (smu->smc_fw_if_version && smu->smc_fw_version) 804 { 805 if (if_version) 806 *if_version = smu->smc_fw_if_version; 807 808 if (smu_version) 809 *smu_version = smu->smc_fw_version; 810 811 return 0; 812 } 813 814 if (if_version) { 815 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); 816 if (ret) 817 return ret; 818 819 smu->smc_fw_if_version = *if_version; 820 } 821 822 if (smu_version) { 823 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); 824 if (ret) 825 return ret; 826 827 smu->smc_fw_version = *smu_version; 828 } 829 830 return ret; 831 } 832 833 int smu_cmn_update_table(struct smu_context *smu, 834 enum smu_table_id table_index, 835 int argument, 836 void *table_data, 837 bool drv2smu) 838 { 839 struct smu_table_context *smu_table = &smu->smu_table; 840 struct amdgpu_device *adev = smu->adev; 841 struct smu_table *table = &smu_table->driver_table; 842 int table_id = smu_cmn_to_asic_specific_index(smu, 843 CMN2ASIC_MAPPING_TABLE, 844 table_index); 845 uint32_t table_size; 846 int ret = 0; 847 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) 848 return -EINVAL; 849 850 table_size = smu_table->tables[table_index].size; 851 852 if (drv2smu) { 853 memcpy(table->cpu_addr, table_data, table_size); 854 /* 855 * Flush hdp cache: to guard the content seen by 856 * GPU is consitent with CPU. 857 */ 858 amdgpu_asic_flush_hdp(adev, NULL); 859 } 860 861 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? 862 SMU_MSG_TransferTableDram2Smu : 863 SMU_MSG_TransferTableSmu2Dram, 864 table_id | ((argument & 0xFFFF) << 16), 865 NULL); 866 if (ret) 867 return ret; 868 869 if (!drv2smu) { 870 amdgpu_asic_invalidate_hdp(adev, NULL); 871 memcpy(table_data, table->cpu_addr, table_size); 872 } 873 874 return 0; 875 } 876 877 int smu_cmn_write_watermarks_table(struct smu_context *smu) 878 { 879 void *watermarks_table = smu->smu_table.watermarks_table; 880 881 if (!watermarks_table) 882 return -EINVAL; 883 884 return smu_cmn_update_table(smu, 885 SMU_TABLE_WATERMARKS, 886 0, 887 watermarks_table, 888 true); 889 } 890 891 int smu_cmn_write_pptable(struct smu_context *smu) 892 { 893 void *pptable = smu->smu_table.driver_pptable; 894 895 return smu_cmn_update_table(smu, 896 SMU_TABLE_PPTABLE, 897 0, 898 pptable, 899 true); 900 } 901 902 int smu_cmn_get_metrics_table(struct smu_context *smu, 903 void *metrics_table, 904 bool bypass_cache) 905 { 906 struct smu_table_context *smu_table= &smu->smu_table; 907 uint32_t table_size = 908 smu_table->tables[SMU_TABLE_SMU_METRICS].size; 909 int ret = 0; 910 911 if (bypass_cache || 912 !smu_table->metrics_time || 913 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { 914 ret = smu_cmn_update_table(smu, 915 SMU_TABLE_SMU_METRICS, 916 0, 917 smu_table->metrics_table, 918 false); 919 if (ret) { 920 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); 921 return ret; 922 } 923 smu_table->metrics_time = jiffies; 924 } 925 926 if (metrics_table) 927 memcpy(metrics_table, smu_table->metrics_table, table_size); 928 929 return 0; 930 } 931 932 int smu_cmn_get_combo_pptable(struct smu_context *smu) 933 { 934 void *pptable = smu->smu_table.combo_pptable; 935 936 return smu_cmn_update_table(smu, 937 SMU_TABLE_COMBO_PPTABLE, 938 0, 939 pptable, 940 false); 941 } 942 943 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) 944 { 945 struct metrics_table_header *header = (struct metrics_table_header *)table; 946 uint16_t structure_size; 947 948 #define METRICS_VERSION(a, b) ((a << 16) | b ) 949 950 switch (METRICS_VERSION(frev, crev)) { 951 case METRICS_VERSION(1, 0): 952 structure_size = sizeof(struct gpu_metrics_v1_0); 953 break; 954 case METRICS_VERSION(1, 1): 955 structure_size = sizeof(struct gpu_metrics_v1_1); 956 break; 957 case METRICS_VERSION(1, 2): 958 structure_size = sizeof(struct gpu_metrics_v1_2); 959 break; 960 case METRICS_VERSION(1, 3): 961 structure_size = sizeof(struct gpu_metrics_v1_3); 962 break; 963 case METRICS_VERSION(2, 0): 964 structure_size = sizeof(struct gpu_metrics_v2_0); 965 break; 966 case METRICS_VERSION(2, 1): 967 structure_size = sizeof(struct gpu_metrics_v2_1); 968 break; 969 case METRICS_VERSION(2, 2): 970 structure_size = sizeof(struct gpu_metrics_v2_2); 971 break; 972 default: 973 return; 974 } 975 976 #undef METRICS_VERSION 977 978 memset(header, 0xFF, structure_size); 979 980 header->format_revision = frev; 981 header->content_revision = crev; 982 header->structure_size = structure_size; 983 984 } 985 986 int smu_cmn_set_mp1_state(struct smu_context *smu, 987 enum pp_mp1_state mp1_state) 988 { 989 enum smu_message_type msg; 990 int ret; 991 992 switch (mp1_state) { 993 case PP_MP1_STATE_SHUTDOWN: 994 msg = SMU_MSG_PrepareMp1ForShutdown; 995 break; 996 case PP_MP1_STATE_UNLOAD: 997 msg = SMU_MSG_PrepareMp1ForUnload; 998 break; 999 case PP_MP1_STATE_RESET: 1000 msg = SMU_MSG_PrepareMp1ForReset; 1001 break; 1002 case PP_MP1_STATE_NONE: 1003 default: 1004 return 0; 1005 } 1006 1007 ret = smu_cmn_send_smc_msg(smu, msg, NULL); 1008 if (ret) 1009 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); 1010 1011 return ret; 1012 } 1013 1014 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) 1015 { 1016 struct pci_dev *p = NULL; 1017 bool snd_driver_loaded; 1018 1019 /* 1020 * If the ASIC comes with no audio function, we always assume 1021 * it is "enabled". 1022 */ 1023 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 1024 adev->pdev->bus->number, 1); 1025 if (!p) 1026 return true; 1027 1028 snd_driver_loaded = pci_is_enabled(p) ? true : false; 1029 1030 pci_dev_put(p); 1031 1032 return snd_driver_loaded; 1033 } 1034