1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <linux/dma-mapping.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "soc15_common.h" 33 #include "psp_v3_1.h" 34 #include "psp_v10_0.h" 35 #include "psp_v11_0.h" 36 #include "psp_v12_0.h" 37 38 #include "amdgpu_ras.h" 39 40 static void psp_set_funcs(struct amdgpu_device *adev); 41 42 static int psp_sysfs_init(struct amdgpu_device *adev); 43 static void psp_sysfs_fini(struct amdgpu_device *adev); 44 45 /* 46 * Due to DF Cstate management centralized to PMFW, the firmware 47 * loading sequence will be updated as below: 48 * - Load KDB 49 * - Load SYS_DRV 50 * - Load tOS 51 * - Load PMFW 52 * - Setup TMR 53 * - Load other non-psp fw 54 * - Load ASD 55 * - Load XGMI/RAS/HDCP/DTM TA if any 56 * 57 * This new sequence is required for 58 * - Arcturus 59 * - Navi12 and onwards 60 */ 61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 62 { 63 struct amdgpu_device *adev = psp->adev; 64 65 psp->pmfw_centralized_cstate_management = false; 66 67 if (amdgpu_sriov_vf(adev)) 68 return; 69 70 if (adev->flags & AMD_IS_APU) 71 return; 72 73 if ((adev->asic_type == CHIP_ARCTURUS) || 74 (adev->asic_type >= CHIP_NAVI12)) 75 psp->pmfw_centralized_cstate_management = true; 76 } 77 78 static int psp_early_init(void *handle) 79 { 80 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 81 struct psp_context *psp = &adev->psp; 82 83 psp_set_funcs(adev); 84 85 switch (adev->asic_type) { 86 case CHIP_VEGA10: 87 case CHIP_VEGA12: 88 psp_v3_1_set_psp_funcs(psp); 89 psp->autoload_supported = false; 90 break; 91 case CHIP_RAVEN: 92 psp_v10_0_set_psp_funcs(psp); 93 psp->autoload_supported = false; 94 break; 95 case CHIP_VEGA20: 96 case CHIP_ARCTURUS: 97 psp_v11_0_set_psp_funcs(psp); 98 psp->autoload_supported = false; 99 break; 100 case CHIP_NAVI10: 101 case CHIP_NAVI14: 102 case CHIP_NAVI12: 103 psp_v11_0_set_psp_funcs(psp); 104 psp->autoload_supported = true; 105 break; 106 case CHIP_RENOIR: 107 psp_v12_0_set_psp_funcs(psp); 108 break; 109 default: 110 return -EINVAL; 111 } 112 113 psp->adev = adev; 114 115 psp_check_pmfw_centralized_cstate_management(psp); 116 117 return 0; 118 } 119 120 static int psp_sw_init(void *handle) 121 { 122 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 123 struct psp_context *psp = &adev->psp; 124 int ret; 125 126 ret = psp_init_microcode(psp); 127 if (ret) { 128 DRM_ERROR("Failed to load psp firmware!\n"); 129 return ret; 130 } 131 132 ret = psp_mem_training_init(psp); 133 if (ret) { 134 DRM_ERROR("Failed to initialize memory training!\n"); 135 return ret; 136 } 137 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 138 if (ret) { 139 DRM_ERROR("Failed to process memory training!\n"); 140 return ret; 141 } 142 143 if (adev->asic_type == CHIP_NAVI10) { 144 ret= psp_sysfs_init(adev); 145 if (ret) { 146 return ret; 147 } 148 } 149 150 return 0; 151 } 152 153 static int psp_sw_fini(void *handle) 154 { 155 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 156 157 psp_mem_training_fini(&adev->psp); 158 release_firmware(adev->psp.sos_fw); 159 adev->psp.sos_fw = NULL; 160 release_firmware(adev->psp.asd_fw); 161 adev->psp.asd_fw = NULL; 162 if (adev->psp.ta_fw) { 163 release_firmware(adev->psp.ta_fw); 164 adev->psp.ta_fw = NULL; 165 } 166 167 if (adev->asic_type == CHIP_NAVI10) 168 psp_sysfs_fini(adev); 169 170 return 0; 171 } 172 173 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 174 uint32_t reg_val, uint32_t mask, bool check_changed) 175 { 176 uint32_t val; 177 int i; 178 struct amdgpu_device *adev = psp->adev; 179 180 for (i = 0; i < adev->usec_timeout; i++) { 181 val = RREG32(reg_index); 182 if (check_changed) { 183 if (val != reg_val) 184 return 0; 185 } else { 186 if ((val & mask) == reg_val) 187 return 0; 188 } 189 udelay(1); 190 } 191 192 return -ETIME; 193 } 194 195 static int 196 psp_cmd_submit_buf(struct psp_context *psp, 197 struct amdgpu_firmware_info *ucode, 198 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 199 { 200 int ret; 201 int index; 202 int timeout = 2000; 203 bool ras_intr = false; 204 bool skip_unsupport = false; 205 206 mutex_lock(&psp->mutex); 207 208 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 209 210 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 211 212 index = atomic_inc_return(&psp->fence_value); 213 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 214 if (ret) { 215 atomic_dec(&psp->fence_value); 216 mutex_unlock(&psp->mutex); 217 return ret; 218 } 219 220 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 221 while (*((unsigned int *)psp->fence_buf) != index) { 222 if (--timeout == 0) 223 break; 224 /* 225 * Shouldn't wait for timeout when err_event_athub occurs, 226 * because gpu reset thread triggered and lock resource should 227 * be released for psp resume sequence. 228 */ 229 ras_intr = amdgpu_ras_intr_triggered(); 230 if (ras_intr) 231 break; 232 msleep(1); 233 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 234 } 235 236 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command in SRIOV */ 237 skip_unsupport = (psp->cmd_buf_mem->resp.status == 0xffff000a) && amdgpu_sriov_vf(psp->adev); 238 239 /* In some cases, psp response status is not 0 even there is no 240 * problem while the command is submitted. Some version of PSP FW 241 * doesn't write 0 to that field. 242 * So here we would like to only print a warning instead of an error 243 * during psp initialization to avoid breaking hw_init and it doesn't 244 * return -EINVAL. 245 */ 246 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 247 if (ucode) 248 DRM_WARN("failed to load ucode id (%d) ", 249 ucode->ucode_id); 250 DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n", 251 psp->cmd_buf_mem->cmd_id, 252 psp->cmd_buf_mem->resp.status); 253 if (!timeout) { 254 mutex_unlock(&psp->mutex); 255 return -EINVAL; 256 } 257 } 258 259 /* get xGMI session id from response buffer */ 260 cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id; 261 262 if (ucode) { 263 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 264 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 265 } 266 mutex_unlock(&psp->mutex); 267 268 return ret; 269 } 270 271 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 272 struct psp_gfx_cmd_resp *cmd, 273 uint64_t tmr_mc, uint32_t size) 274 { 275 if (psp_support_vmr_ring(psp)) 276 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 277 else 278 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 279 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 280 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 281 cmd->cmd.cmd_setup_tmr.buf_size = size; 282 } 283 284 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 285 uint64_t pri_buf_mc, uint32_t size) 286 { 287 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 288 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 289 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 290 cmd->cmd.cmd_load_toc.toc_size = size; 291 } 292 293 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 294 static int psp_load_toc(struct psp_context *psp, 295 uint32_t *tmr_size) 296 { 297 int ret; 298 struct psp_gfx_cmd_resp *cmd; 299 300 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 301 if (!cmd) 302 return -ENOMEM; 303 /* Copy toc to psp firmware private buffer */ 304 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 305 memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size); 306 307 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size); 308 309 ret = psp_cmd_submit_buf(psp, NULL, cmd, 310 psp->fence_buf_mc_addr); 311 if (!ret) 312 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 313 kfree(cmd); 314 return ret; 315 } 316 317 /* Set up Trusted Memory Region */ 318 static int psp_tmr_init(struct psp_context *psp) 319 { 320 int ret; 321 int tmr_size; 322 void *tmr_buf; 323 void **pptr; 324 325 /* 326 * According to HW engineer, they prefer the TMR address be "naturally 327 * aligned" , e.g. the start address be an integer divide of TMR size. 328 * 329 * Note: this memory need be reserved till the driver 330 * uninitializes. 331 */ 332 tmr_size = PSP_TMR_SIZE; 333 334 /* For ASICs support RLC autoload, psp will parse the toc 335 * and calculate the total size of TMR needed */ 336 if (!amdgpu_sriov_vf(psp->adev) && 337 psp->toc_start_addr && 338 psp->toc_bin_size && 339 psp->fw_pri_buf) { 340 ret = psp_load_toc(psp, &tmr_size); 341 if (ret) { 342 DRM_ERROR("Failed to load toc\n"); 343 return ret; 344 } 345 } 346 347 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 348 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE, 349 AMDGPU_GEM_DOMAIN_VRAM, 350 &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 351 352 return ret; 353 } 354 355 static int psp_tmr_load(struct psp_context *psp) 356 { 357 int ret; 358 struct psp_gfx_cmd_resp *cmd; 359 360 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 361 if (!cmd) 362 return -ENOMEM; 363 364 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, 365 amdgpu_bo_size(psp->tmr_bo)); 366 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 367 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 368 369 ret = psp_cmd_submit_buf(psp, NULL, cmd, 370 psp->fence_buf_mc_addr); 371 372 kfree(cmd); 373 374 return ret; 375 } 376 377 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 378 uint64_t asd_mc, uint32_t size) 379 { 380 cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; 381 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); 382 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); 383 cmd->cmd.cmd_load_ta.app_len = size; 384 385 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; 386 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; 387 cmd->cmd.cmd_load_ta.cmd_buf_len = 0; 388 } 389 390 static int psp_asd_load(struct psp_context *psp) 391 { 392 int ret; 393 struct psp_gfx_cmd_resp *cmd; 394 395 /* If PSP version doesn't match ASD version, asd loading will be failed. 396 * add workaround to bypass it for sriov now. 397 * TODO: add version check to make it common 398 */ 399 if (amdgpu_sriov_vf(psp->adev)) 400 return 0; 401 402 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 403 if (!cmd) 404 return -ENOMEM; 405 406 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 407 memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); 408 409 psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 410 psp->asd_ucode_size); 411 412 ret = psp_cmd_submit_buf(psp, NULL, cmd, 413 psp->fence_buf_mc_addr); 414 if (!ret) { 415 psp->asd_context.asd_initialized = true; 416 psp->asd_context.session_id = cmd->resp.session_id; 417 } 418 419 kfree(cmd); 420 421 return ret; 422 } 423 424 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 425 uint32_t session_id) 426 { 427 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 428 cmd->cmd.cmd_unload_ta.session_id = session_id; 429 } 430 431 static int psp_asd_unload(struct psp_context *psp) 432 { 433 int ret; 434 struct psp_gfx_cmd_resp *cmd; 435 436 if (amdgpu_sriov_vf(psp->adev)) 437 return 0; 438 439 if (!psp->asd_context.asd_initialized) 440 return 0; 441 442 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 443 if (!cmd) 444 return -ENOMEM; 445 446 psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); 447 448 ret = psp_cmd_submit_buf(psp, NULL, cmd, 449 psp->fence_buf_mc_addr); 450 if (!ret) 451 psp->asd_context.asd_initialized = false; 452 453 kfree(cmd); 454 455 return ret; 456 } 457 458 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 459 uint32_t id, uint32_t value) 460 { 461 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 462 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 463 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 464 } 465 466 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 467 uint32_t value) 468 { 469 struct psp_gfx_cmd_resp *cmd = NULL; 470 int ret = 0; 471 472 if (reg >= PSP_REG_LAST) 473 return -EINVAL; 474 475 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 476 if (!cmd) 477 return -ENOMEM; 478 479 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 480 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 481 482 kfree(cmd); 483 return ret; 484 } 485 486 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 487 uint64_t ta_bin_mc, 488 uint32_t ta_bin_size, 489 uint64_t ta_shared_mc, 490 uint32_t ta_shared_size) 491 { 492 cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 493 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 494 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 495 cmd->cmd.cmd_load_ta.app_len = ta_bin_size; 496 497 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); 498 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); 499 cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; 500 } 501 502 static int psp_xgmi_init_shared_buf(struct psp_context *psp) 503 { 504 int ret; 505 506 /* 507 * Allocate 16k memory aligned to 4k from Frame Buffer (local 508 * physical) for xgmi ta <-> Driver 509 */ 510 ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, 511 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 512 &psp->xgmi_context.xgmi_shared_bo, 513 &psp->xgmi_context.xgmi_shared_mc_addr, 514 &psp->xgmi_context.xgmi_shared_buf); 515 516 return ret; 517 } 518 519 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 520 uint32_t ta_cmd_id, 521 uint32_t session_id) 522 { 523 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 524 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 525 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 526 } 527 528 int psp_ta_invoke(struct psp_context *psp, 529 uint32_t ta_cmd_id, 530 uint32_t session_id) 531 { 532 int ret; 533 struct psp_gfx_cmd_resp *cmd; 534 535 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 536 if (!cmd) 537 return -ENOMEM; 538 539 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); 540 541 ret = psp_cmd_submit_buf(psp, NULL, cmd, 542 psp->fence_buf_mc_addr); 543 544 kfree(cmd); 545 546 return ret; 547 } 548 549 static int psp_xgmi_load(struct psp_context *psp) 550 { 551 int ret; 552 struct psp_gfx_cmd_resp *cmd; 553 554 /* 555 * TODO: bypass the loading in sriov for now 556 */ 557 558 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 559 if (!cmd) 560 return -ENOMEM; 561 562 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 563 memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); 564 565 psp_prep_ta_load_cmd_buf(cmd, 566 psp->fw_pri_mc_addr, 567 psp->ta_xgmi_ucode_size, 568 psp->xgmi_context.xgmi_shared_mc_addr, 569 PSP_XGMI_SHARED_MEM_SIZE); 570 571 ret = psp_cmd_submit_buf(psp, NULL, cmd, 572 psp->fence_buf_mc_addr); 573 574 if (!ret) { 575 psp->xgmi_context.initialized = 1; 576 psp->xgmi_context.session_id = cmd->resp.session_id; 577 } 578 579 kfree(cmd); 580 581 return ret; 582 } 583 584 static int psp_xgmi_unload(struct psp_context *psp) 585 { 586 int ret; 587 struct psp_gfx_cmd_resp *cmd; 588 struct amdgpu_device *adev = psp->adev; 589 590 /* XGMI TA unload currently is not supported on Arcturus */ 591 if (adev->asic_type == CHIP_ARCTURUS) 592 return 0; 593 594 /* 595 * TODO: bypass the unloading in sriov for now 596 */ 597 598 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 599 if (!cmd) 600 return -ENOMEM; 601 602 psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); 603 604 ret = psp_cmd_submit_buf(psp, NULL, cmd, 605 psp->fence_buf_mc_addr); 606 607 kfree(cmd); 608 609 return ret; 610 } 611 612 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 613 { 614 return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); 615 } 616 617 int psp_xgmi_terminate(struct psp_context *psp) 618 { 619 int ret; 620 621 if (!psp->xgmi_context.initialized) 622 return 0; 623 624 ret = psp_xgmi_unload(psp); 625 if (ret) 626 return ret; 627 628 psp->xgmi_context.initialized = 0; 629 630 /* free xgmi shared memory */ 631 amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, 632 &psp->xgmi_context.xgmi_shared_mc_addr, 633 &psp->xgmi_context.xgmi_shared_buf); 634 635 return 0; 636 } 637 638 int psp_xgmi_initialize(struct psp_context *psp) 639 { 640 struct ta_xgmi_shared_memory *xgmi_cmd; 641 int ret; 642 643 if (!psp->adev->psp.ta_fw || 644 !psp->adev->psp.ta_xgmi_ucode_size || 645 !psp->adev->psp.ta_xgmi_start_addr) 646 return -ENOENT; 647 648 if (!psp->xgmi_context.initialized) { 649 ret = psp_xgmi_init_shared_buf(psp); 650 if (ret) 651 return ret; 652 } 653 654 /* Load XGMI TA */ 655 ret = psp_xgmi_load(psp); 656 if (ret) 657 return ret; 658 659 /* Initialize XGMI session */ 660 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); 661 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 662 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 663 664 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 665 666 return ret; 667 } 668 669 // ras begin 670 static int psp_ras_init_shared_buf(struct psp_context *psp) 671 { 672 int ret; 673 674 /* 675 * Allocate 16k memory aligned to 4k from Frame Buffer (local 676 * physical) for ras ta <-> Driver 677 */ 678 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE, 679 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 680 &psp->ras.ras_shared_bo, 681 &psp->ras.ras_shared_mc_addr, 682 &psp->ras.ras_shared_buf); 683 684 return ret; 685 } 686 687 static int psp_ras_load(struct psp_context *psp) 688 { 689 int ret; 690 struct psp_gfx_cmd_resp *cmd; 691 692 /* 693 * TODO: bypass the loading in sriov for now 694 */ 695 if (amdgpu_sriov_vf(psp->adev)) 696 return 0; 697 698 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 699 if (!cmd) 700 return -ENOMEM; 701 702 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 703 memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); 704 705 psp_prep_ta_load_cmd_buf(cmd, 706 psp->fw_pri_mc_addr, 707 psp->ta_ras_ucode_size, 708 psp->ras.ras_shared_mc_addr, 709 PSP_RAS_SHARED_MEM_SIZE); 710 711 ret = psp_cmd_submit_buf(psp, NULL, cmd, 712 psp->fence_buf_mc_addr); 713 714 if (!ret) { 715 psp->ras.ras_initialized = true; 716 psp->ras.session_id = cmd->resp.session_id; 717 } 718 719 kfree(cmd); 720 721 return ret; 722 } 723 724 static int psp_ras_unload(struct psp_context *psp) 725 { 726 int ret; 727 struct psp_gfx_cmd_resp *cmd; 728 729 /* 730 * TODO: bypass the unloading in sriov for now 731 */ 732 if (amdgpu_sriov_vf(psp->adev)) 733 return 0; 734 735 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 736 if (!cmd) 737 return -ENOMEM; 738 739 psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id); 740 741 ret = psp_cmd_submit_buf(psp, NULL, cmd, 742 psp->fence_buf_mc_addr); 743 744 kfree(cmd); 745 746 return ret; 747 } 748 749 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 750 { 751 /* 752 * TODO: bypass the loading in sriov for now 753 */ 754 if (amdgpu_sriov_vf(psp->adev)) 755 return 0; 756 757 return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id); 758 } 759 760 int psp_ras_enable_features(struct psp_context *psp, 761 union ta_ras_cmd_input *info, bool enable) 762 { 763 struct ta_ras_shared_memory *ras_cmd; 764 int ret; 765 766 if (!psp->ras.ras_initialized) 767 return -EINVAL; 768 769 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 770 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 771 772 if (enable) 773 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 774 else 775 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 776 777 ras_cmd->ras_in_message = *info; 778 779 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 780 if (ret) 781 return -EINVAL; 782 783 return ras_cmd->ras_status; 784 } 785 786 static int psp_ras_terminate(struct psp_context *psp) 787 { 788 int ret; 789 790 /* 791 * TODO: bypass the terminate in sriov for now 792 */ 793 if (amdgpu_sriov_vf(psp->adev)) 794 return 0; 795 796 if (!psp->ras.ras_initialized) 797 return 0; 798 799 ret = psp_ras_unload(psp); 800 if (ret) 801 return ret; 802 803 psp->ras.ras_initialized = false; 804 805 /* free ras shared memory */ 806 amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, 807 &psp->ras.ras_shared_mc_addr, 808 &psp->ras.ras_shared_buf); 809 810 return 0; 811 } 812 813 static int psp_ras_initialize(struct psp_context *psp) 814 { 815 int ret; 816 817 /* 818 * TODO: bypass the initialize in sriov for now 819 */ 820 if (amdgpu_sriov_vf(psp->adev)) 821 return 0; 822 823 if (!psp->adev->psp.ta_ras_ucode_size || 824 !psp->adev->psp.ta_ras_start_addr) { 825 dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n"); 826 return 0; 827 } 828 829 if (!psp->ras.ras_initialized) { 830 ret = psp_ras_init_shared_buf(psp); 831 if (ret) 832 return ret; 833 } 834 835 ret = psp_ras_load(psp); 836 if (ret) 837 return ret; 838 839 return 0; 840 } 841 // ras end 842 843 // HDCP start 844 static int psp_hdcp_init_shared_buf(struct psp_context *psp) 845 { 846 int ret; 847 848 /* 849 * Allocate 16k memory aligned to 4k from Frame Buffer (local 850 * physical) for hdcp ta <-> Driver 851 */ 852 ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, 853 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 854 &psp->hdcp_context.hdcp_shared_bo, 855 &psp->hdcp_context.hdcp_shared_mc_addr, 856 &psp->hdcp_context.hdcp_shared_buf); 857 858 return ret; 859 } 860 861 static int psp_hdcp_load(struct psp_context *psp) 862 { 863 int ret; 864 struct psp_gfx_cmd_resp *cmd; 865 866 /* 867 * TODO: bypass the loading in sriov for now 868 */ 869 if (amdgpu_sriov_vf(psp->adev)) 870 return 0; 871 872 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 873 if (!cmd) 874 return -ENOMEM; 875 876 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 877 memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, 878 psp->ta_hdcp_ucode_size); 879 880 psp_prep_ta_load_cmd_buf(cmd, 881 psp->fw_pri_mc_addr, 882 psp->ta_hdcp_ucode_size, 883 psp->hdcp_context.hdcp_shared_mc_addr, 884 PSP_HDCP_SHARED_MEM_SIZE); 885 886 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 887 888 if (!ret) { 889 psp->hdcp_context.hdcp_initialized = true; 890 psp->hdcp_context.session_id = cmd->resp.session_id; 891 mutex_init(&psp->hdcp_context.mutex); 892 } 893 894 kfree(cmd); 895 896 return ret; 897 } 898 static int psp_hdcp_initialize(struct psp_context *psp) 899 { 900 int ret; 901 902 /* 903 * TODO: bypass the initialize in sriov for now 904 */ 905 if (amdgpu_sriov_vf(psp->adev)) 906 return 0; 907 908 if (!psp->adev->psp.ta_hdcp_ucode_size || 909 !psp->adev->psp.ta_hdcp_start_addr) { 910 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 911 return 0; 912 } 913 914 if (!psp->hdcp_context.hdcp_initialized) { 915 ret = psp_hdcp_init_shared_buf(psp); 916 if (ret) 917 return ret; 918 } 919 920 ret = psp_hdcp_load(psp); 921 if (ret) 922 return ret; 923 924 return 0; 925 } 926 927 static int psp_hdcp_unload(struct psp_context *psp) 928 { 929 int ret; 930 struct psp_gfx_cmd_resp *cmd; 931 932 /* 933 * TODO: bypass the unloading in sriov for now 934 */ 935 if (amdgpu_sriov_vf(psp->adev)) 936 return 0; 937 938 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 939 if (!cmd) 940 return -ENOMEM; 941 942 psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); 943 944 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 945 946 kfree(cmd); 947 948 return ret; 949 } 950 951 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 952 { 953 /* 954 * TODO: bypass the loading in sriov for now 955 */ 956 if (amdgpu_sriov_vf(psp->adev)) 957 return 0; 958 959 return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id); 960 } 961 962 static int psp_hdcp_terminate(struct psp_context *psp) 963 { 964 int ret; 965 966 /* 967 * TODO: bypass the terminate in sriov for now 968 */ 969 if (amdgpu_sriov_vf(psp->adev)) 970 return 0; 971 972 if (!psp->hdcp_context.hdcp_initialized) 973 return 0; 974 975 ret = psp_hdcp_unload(psp); 976 if (ret) 977 return ret; 978 979 psp->hdcp_context.hdcp_initialized = false; 980 981 /* free hdcp shared memory */ 982 amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, 983 &psp->hdcp_context.hdcp_shared_mc_addr, 984 &psp->hdcp_context.hdcp_shared_buf); 985 986 return 0; 987 } 988 // HDCP end 989 990 // DTM start 991 static int psp_dtm_init_shared_buf(struct psp_context *psp) 992 { 993 int ret; 994 995 /* 996 * Allocate 16k memory aligned to 4k from Frame Buffer (local 997 * physical) for dtm ta <-> Driver 998 */ 999 ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, 1000 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1001 &psp->dtm_context.dtm_shared_bo, 1002 &psp->dtm_context.dtm_shared_mc_addr, 1003 &psp->dtm_context.dtm_shared_buf); 1004 1005 return ret; 1006 } 1007 1008 static int psp_dtm_load(struct psp_context *psp) 1009 { 1010 int ret; 1011 struct psp_gfx_cmd_resp *cmd; 1012 1013 /* 1014 * TODO: bypass the loading in sriov for now 1015 */ 1016 if (amdgpu_sriov_vf(psp->adev)) 1017 return 0; 1018 1019 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1020 if (!cmd) 1021 return -ENOMEM; 1022 1023 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1024 memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); 1025 1026 psp_prep_ta_load_cmd_buf(cmd, 1027 psp->fw_pri_mc_addr, 1028 psp->ta_dtm_ucode_size, 1029 psp->dtm_context.dtm_shared_mc_addr, 1030 PSP_DTM_SHARED_MEM_SIZE); 1031 1032 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1033 1034 if (!ret) { 1035 psp->dtm_context.dtm_initialized = true; 1036 psp->dtm_context.session_id = cmd->resp.session_id; 1037 mutex_init(&psp->dtm_context.mutex); 1038 } 1039 1040 kfree(cmd); 1041 1042 return ret; 1043 } 1044 1045 static int psp_dtm_initialize(struct psp_context *psp) 1046 { 1047 int ret; 1048 1049 /* 1050 * TODO: bypass the initialize in sriov for now 1051 */ 1052 if (amdgpu_sriov_vf(psp->adev)) 1053 return 0; 1054 1055 if (!psp->adev->psp.ta_dtm_ucode_size || 1056 !psp->adev->psp.ta_dtm_start_addr) { 1057 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1058 return 0; 1059 } 1060 1061 if (!psp->dtm_context.dtm_initialized) { 1062 ret = psp_dtm_init_shared_buf(psp); 1063 if (ret) 1064 return ret; 1065 } 1066 1067 ret = psp_dtm_load(psp); 1068 if (ret) 1069 return ret; 1070 1071 return 0; 1072 } 1073 1074 static int psp_dtm_unload(struct psp_context *psp) 1075 { 1076 int ret; 1077 struct psp_gfx_cmd_resp *cmd; 1078 1079 /* 1080 * TODO: bypass the unloading in sriov for now 1081 */ 1082 if (amdgpu_sriov_vf(psp->adev)) 1083 return 0; 1084 1085 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1086 if (!cmd) 1087 return -ENOMEM; 1088 1089 psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id); 1090 1091 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1092 1093 kfree(cmd); 1094 1095 return ret; 1096 } 1097 1098 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1099 { 1100 /* 1101 * TODO: bypass the loading in sriov for now 1102 */ 1103 if (amdgpu_sriov_vf(psp->adev)) 1104 return 0; 1105 1106 return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id); 1107 } 1108 1109 static int psp_dtm_terminate(struct psp_context *psp) 1110 { 1111 int ret; 1112 1113 /* 1114 * TODO: bypass the terminate in sriov for now 1115 */ 1116 if (amdgpu_sriov_vf(psp->adev)) 1117 return 0; 1118 1119 if (!psp->dtm_context.dtm_initialized) 1120 return 0; 1121 1122 ret = psp_dtm_unload(psp); 1123 if (ret) 1124 return ret; 1125 1126 psp->dtm_context.dtm_initialized = false; 1127 1128 /* free hdcp shared memory */ 1129 amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, 1130 &psp->dtm_context.dtm_shared_mc_addr, 1131 &psp->dtm_context.dtm_shared_buf); 1132 1133 return 0; 1134 } 1135 // DTM end 1136 1137 static int psp_hw_start(struct psp_context *psp) 1138 { 1139 struct amdgpu_device *adev = psp->adev; 1140 int ret; 1141 1142 if (!amdgpu_sriov_vf(adev)) { 1143 if (psp->kdb_bin_size && 1144 (psp->funcs->bootloader_load_kdb != NULL)) { 1145 ret = psp_bootloader_load_kdb(psp); 1146 if (ret) { 1147 DRM_ERROR("PSP load kdb failed!\n"); 1148 return ret; 1149 } 1150 } 1151 1152 ret = psp_bootloader_load_sysdrv(psp); 1153 if (ret) { 1154 DRM_ERROR("PSP load sysdrv failed!\n"); 1155 return ret; 1156 } 1157 1158 ret = psp_bootloader_load_sos(psp); 1159 if (ret) { 1160 DRM_ERROR("PSP load sos failed!\n"); 1161 return ret; 1162 } 1163 } 1164 1165 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 1166 if (ret) { 1167 DRM_ERROR("PSP create ring failed!\n"); 1168 return ret; 1169 } 1170 1171 ret = psp_tmr_init(psp); 1172 if (ret) { 1173 DRM_ERROR("PSP tmr init failed!\n"); 1174 return ret; 1175 } 1176 1177 /* 1178 * For those ASICs with DF Cstate management centralized 1179 * to PMFW, TMR setup should be performed after PMFW 1180 * loaded and before other non-psp firmware loaded. 1181 */ 1182 if (!psp->pmfw_centralized_cstate_management) { 1183 ret = psp_tmr_load(psp); 1184 if (ret) { 1185 DRM_ERROR("PSP load tmr failed!\n"); 1186 return ret; 1187 } 1188 } 1189 1190 return 0; 1191 } 1192 1193 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 1194 enum psp_gfx_fw_type *type) 1195 { 1196 switch (ucode->ucode_id) { 1197 case AMDGPU_UCODE_ID_SDMA0: 1198 *type = GFX_FW_TYPE_SDMA0; 1199 break; 1200 case AMDGPU_UCODE_ID_SDMA1: 1201 *type = GFX_FW_TYPE_SDMA1; 1202 break; 1203 case AMDGPU_UCODE_ID_SDMA2: 1204 *type = GFX_FW_TYPE_SDMA2; 1205 break; 1206 case AMDGPU_UCODE_ID_SDMA3: 1207 *type = GFX_FW_TYPE_SDMA3; 1208 break; 1209 case AMDGPU_UCODE_ID_SDMA4: 1210 *type = GFX_FW_TYPE_SDMA4; 1211 break; 1212 case AMDGPU_UCODE_ID_SDMA5: 1213 *type = GFX_FW_TYPE_SDMA5; 1214 break; 1215 case AMDGPU_UCODE_ID_SDMA6: 1216 *type = GFX_FW_TYPE_SDMA6; 1217 break; 1218 case AMDGPU_UCODE_ID_SDMA7: 1219 *type = GFX_FW_TYPE_SDMA7; 1220 break; 1221 case AMDGPU_UCODE_ID_CP_CE: 1222 *type = GFX_FW_TYPE_CP_CE; 1223 break; 1224 case AMDGPU_UCODE_ID_CP_PFP: 1225 *type = GFX_FW_TYPE_CP_PFP; 1226 break; 1227 case AMDGPU_UCODE_ID_CP_ME: 1228 *type = GFX_FW_TYPE_CP_ME; 1229 break; 1230 case AMDGPU_UCODE_ID_CP_MEC1: 1231 *type = GFX_FW_TYPE_CP_MEC; 1232 break; 1233 case AMDGPU_UCODE_ID_CP_MEC1_JT: 1234 *type = GFX_FW_TYPE_CP_MEC_ME1; 1235 break; 1236 case AMDGPU_UCODE_ID_CP_MEC2: 1237 *type = GFX_FW_TYPE_CP_MEC; 1238 break; 1239 case AMDGPU_UCODE_ID_CP_MEC2_JT: 1240 *type = GFX_FW_TYPE_CP_MEC_ME2; 1241 break; 1242 case AMDGPU_UCODE_ID_RLC_G: 1243 *type = GFX_FW_TYPE_RLC_G; 1244 break; 1245 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 1246 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 1247 break; 1248 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 1249 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 1250 break; 1251 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 1252 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 1253 break; 1254 case AMDGPU_UCODE_ID_SMC: 1255 *type = GFX_FW_TYPE_SMU; 1256 break; 1257 case AMDGPU_UCODE_ID_UVD: 1258 *type = GFX_FW_TYPE_UVD; 1259 break; 1260 case AMDGPU_UCODE_ID_UVD1: 1261 *type = GFX_FW_TYPE_UVD1; 1262 break; 1263 case AMDGPU_UCODE_ID_VCE: 1264 *type = GFX_FW_TYPE_VCE; 1265 break; 1266 case AMDGPU_UCODE_ID_VCN: 1267 *type = GFX_FW_TYPE_VCN; 1268 break; 1269 case AMDGPU_UCODE_ID_VCN1: 1270 *type = GFX_FW_TYPE_VCN1; 1271 break; 1272 case AMDGPU_UCODE_ID_DMCU_ERAM: 1273 *type = GFX_FW_TYPE_DMCU_ERAM; 1274 break; 1275 case AMDGPU_UCODE_ID_DMCU_INTV: 1276 *type = GFX_FW_TYPE_DMCU_ISR; 1277 break; 1278 case AMDGPU_UCODE_ID_VCN0_RAM: 1279 *type = GFX_FW_TYPE_VCN0_RAM; 1280 break; 1281 case AMDGPU_UCODE_ID_VCN1_RAM: 1282 *type = GFX_FW_TYPE_VCN1_RAM; 1283 break; 1284 case AMDGPU_UCODE_ID_DMCUB: 1285 *type = GFX_FW_TYPE_DMUB; 1286 break; 1287 case AMDGPU_UCODE_ID_MAXIMUM: 1288 default: 1289 return -EINVAL; 1290 } 1291 1292 return 0; 1293 } 1294 1295 static void psp_print_fw_hdr(struct psp_context *psp, 1296 struct amdgpu_firmware_info *ucode) 1297 { 1298 struct amdgpu_device *adev = psp->adev; 1299 struct common_firmware_header *hdr; 1300 1301 switch (ucode->ucode_id) { 1302 case AMDGPU_UCODE_ID_SDMA0: 1303 case AMDGPU_UCODE_ID_SDMA1: 1304 case AMDGPU_UCODE_ID_SDMA2: 1305 case AMDGPU_UCODE_ID_SDMA3: 1306 case AMDGPU_UCODE_ID_SDMA4: 1307 case AMDGPU_UCODE_ID_SDMA5: 1308 case AMDGPU_UCODE_ID_SDMA6: 1309 case AMDGPU_UCODE_ID_SDMA7: 1310 hdr = (struct common_firmware_header *) 1311 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 1312 amdgpu_ucode_print_sdma_hdr(hdr); 1313 break; 1314 case AMDGPU_UCODE_ID_CP_CE: 1315 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 1316 amdgpu_ucode_print_gfx_hdr(hdr); 1317 break; 1318 case AMDGPU_UCODE_ID_CP_PFP: 1319 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 1320 amdgpu_ucode_print_gfx_hdr(hdr); 1321 break; 1322 case AMDGPU_UCODE_ID_CP_ME: 1323 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 1324 amdgpu_ucode_print_gfx_hdr(hdr); 1325 break; 1326 case AMDGPU_UCODE_ID_CP_MEC1: 1327 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 1328 amdgpu_ucode_print_gfx_hdr(hdr); 1329 break; 1330 case AMDGPU_UCODE_ID_RLC_G: 1331 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 1332 amdgpu_ucode_print_rlc_hdr(hdr); 1333 break; 1334 case AMDGPU_UCODE_ID_SMC: 1335 hdr = (struct common_firmware_header *)adev->pm.fw->data; 1336 amdgpu_ucode_print_smc_hdr(hdr); 1337 break; 1338 default: 1339 break; 1340 } 1341 } 1342 1343 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, 1344 struct psp_gfx_cmd_resp *cmd) 1345 { 1346 int ret; 1347 uint64_t fw_mem_mc_addr = ucode->mc_addr; 1348 1349 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 1350 1351 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1352 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 1353 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 1354 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 1355 1356 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 1357 if (ret) 1358 DRM_ERROR("Unknown firmware type\n"); 1359 1360 return ret; 1361 } 1362 1363 static int psp_execute_np_fw_load(struct psp_context *psp, 1364 struct amdgpu_firmware_info *ucode) 1365 { 1366 int ret = 0; 1367 1368 ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd); 1369 if (ret) 1370 return ret; 1371 1372 ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, 1373 psp->fence_buf_mc_addr); 1374 1375 return ret; 1376 } 1377 1378 static int psp_np_fw_load(struct psp_context *psp) 1379 { 1380 int i, ret; 1381 struct amdgpu_firmware_info *ucode; 1382 struct amdgpu_device* adev = psp->adev; 1383 1384 if (psp->autoload_supported || 1385 psp->pmfw_centralized_cstate_management) { 1386 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 1387 if (!ucode->fw || amdgpu_sriov_vf(adev)) 1388 goto out; 1389 1390 ret = psp_execute_np_fw_load(psp, ucode); 1391 if (ret) 1392 return ret; 1393 } 1394 1395 if (psp->pmfw_centralized_cstate_management) { 1396 ret = psp_tmr_load(psp); 1397 if (ret) { 1398 DRM_ERROR("PSP load tmr failed!\n"); 1399 return ret; 1400 } 1401 } 1402 1403 out: 1404 for (i = 0; i < adev->firmware.max_ucodes; i++) { 1405 ucode = &adev->firmware.ucode[i]; 1406 if (!ucode->fw) 1407 continue; 1408 1409 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1410 (psp_smu_reload_quirk(psp) || 1411 psp->autoload_supported || 1412 psp->pmfw_centralized_cstate_management)) 1413 continue; 1414 1415 if (amdgpu_sriov_vf(adev) && 1416 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 1417 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 1418 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 1419 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 1420 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 1421 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 1422 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 1423 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 1424 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G 1425 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 1426 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 1427 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 1428 || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) 1429 /*skip ucode loading in SRIOV VF */ 1430 continue; 1431 1432 if (psp->autoload_supported && 1433 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 1434 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 1435 /* skip mec JT when autoload is enabled */ 1436 continue; 1437 1438 psp_print_fw_hdr(psp, ucode); 1439 1440 ret = psp_execute_np_fw_load(psp, ucode); 1441 if (ret) 1442 return ret; 1443 1444 /* Start rlc autoload after psp recieved all the gfx firmware */ 1445 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 1446 AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { 1447 ret = psp_rlc_autoload(psp); 1448 if (ret) { 1449 DRM_ERROR("Failed to start rlc autoload\n"); 1450 return ret; 1451 } 1452 } 1453 #if 0 1454 /* check if firmware loaded sucessfully */ 1455 if (!amdgpu_psp_check_fw_loading_status(adev, i)) 1456 return -EINVAL; 1457 #endif 1458 } 1459 1460 return 0; 1461 } 1462 1463 static int psp_load_fw(struct amdgpu_device *adev) 1464 { 1465 int ret; 1466 struct psp_context *psp = &adev->psp; 1467 1468 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { 1469 psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ 1470 goto skip_memalloc; 1471 } 1472 1473 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1474 if (!psp->cmd) 1475 return -ENOMEM; 1476 1477 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 1478 AMDGPU_GEM_DOMAIN_GTT, 1479 &psp->fw_pri_bo, 1480 &psp->fw_pri_mc_addr, 1481 &psp->fw_pri_buf); 1482 if (ret) 1483 goto failed; 1484 1485 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 1486 AMDGPU_GEM_DOMAIN_VRAM, 1487 &psp->fence_buf_bo, 1488 &psp->fence_buf_mc_addr, 1489 &psp->fence_buf); 1490 if (ret) 1491 goto failed; 1492 1493 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 1494 AMDGPU_GEM_DOMAIN_VRAM, 1495 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1496 (void **)&psp->cmd_buf_mem); 1497 if (ret) 1498 goto failed; 1499 1500 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 1501 1502 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 1503 if (ret) { 1504 DRM_ERROR("PSP ring init failed!\n"); 1505 goto failed; 1506 } 1507 1508 skip_memalloc: 1509 ret = psp_hw_start(psp); 1510 if (ret) 1511 goto failed; 1512 1513 ret = psp_np_fw_load(psp); 1514 if (ret) 1515 goto failed; 1516 1517 ret = psp_asd_load(psp); 1518 if (ret) { 1519 DRM_ERROR("PSP load asd failed!\n"); 1520 return ret; 1521 } 1522 1523 if (psp->adev->psp.ta_fw) { 1524 ret = psp_ras_initialize(psp); 1525 if (ret) 1526 dev_err(psp->adev->dev, 1527 "RAS: Failed to initialize RAS\n"); 1528 1529 ret = psp_hdcp_initialize(psp); 1530 if (ret) 1531 dev_err(psp->adev->dev, 1532 "HDCP: Failed to initialize HDCP\n"); 1533 1534 ret = psp_dtm_initialize(psp); 1535 if (ret) 1536 dev_err(psp->adev->dev, 1537 "DTM: Failed to initialize DTM\n"); 1538 } 1539 1540 return 0; 1541 1542 failed: 1543 /* 1544 * all cleanup jobs (xgmi terminate, ras terminate, 1545 * ring destroy, cmd/fence/fw buffers destory, 1546 * psp->cmd destory) are delayed to psp_hw_fini 1547 */ 1548 return ret; 1549 } 1550 1551 static int psp_hw_init(void *handle) 1552 { 1553 int ret; 1554 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1555 1556 mutex_lock(&adev->firmware.mutex); 1557 /* 1558 * This sequence is just used on hw_init only once, no need on 1559 * resume. 1560 */ 1561 ret = amdgpu_ucode_init_bo(adev); 1562 if (ret) 1563 goto failed; 1564 1565 ret = psp_load_fw(adev); 1566 if (ret) { 1567 DRM_ERROR("PSP firmware loading failed\n"); 1568 goto failed; 1569 } 1570 1571 mutex_unlock(&adev->firmware.mutex); 1572 return 0; 1573 1574 failed: 1575 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 1576 mutex_unlock(&adev->firmware.mutex); 1577 return -EINVAL; 1578 } 1579 1580 static int psp_hw_fini(void *handle) 1581 { 1582 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1583 struct psp_context *psp = &adev->psp; 1584 void *tmr_buf; 1585 void **pptr; 1586 1587 if (psp->adev->psp.ta_fw) { 1588 psp_ras_terminate(psp); 1589 psp_dtm_terminate(psp); 1590 psp_hdcp_terminate(psp); 1591 } 1592 1593 psp_asd_unload(psp); 1594 1595 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 1596 1597 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 1598 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 1599 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 1600 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 1601 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 1602 &psp->fence_buf_mc_addr, &psp->fence_buf); 1603 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1604 (void **)&psp->cmd_buf_mem); 1605 1606 kfree(psp->cmd); 1607 psp->cmd = NULL; 1608 1609 return 0; 1610 } 1611 1612 static int psp_suspend(void *handle) 1613 { 1614 int ret; 1615 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1616 struct psp_context *psp = &adev->psp; 1617 1618 if (adev->gmc.xgmi.num_physical_nodes > 1 && 1619 psp->xgmi_context.initialized == 1) { 1620 ret = psp_xgmi_terminate(psp); 1621 if (ret) { 1622 DRM_ERROR("Failed to terminate xgmi ta\n"); 1623 return ret; 1624 } 1625 } 1626 1627 if (psp->adev->psp.ta_fw) { 1628 ret = psp_ras_terminate(psp); 1629 if (ret) { 1630 DRM_ERROR("Failed to terminate ras ta\n"); 1631 return ret; 1632 } 1633 ret = psp_hdcp_terminate(psp); 1634 if (ret) { 1635 DRM_ERROR("Failed to terminate hdcp ta\n"); 1636 return ret; 1637 } 1638 ret = psp_dtm_terminate(psp); 1639 if (ret) { 1640 DRM_ERROR("Failed to terminate dtm ta\n"); 1641 return ret; 1642 } 1643 } 1644 1645 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 1646 if (ret) { 1647 DRM_ERROR("PSP ring stop failed\n"); 1648 return ret; 1649 } 1650 1651 return 0; 1652 } 1653 1654 static int psp_resume(void *handle) 1655 { 1656 int ret; 1657 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1658 struct psp_context *psp = &adev->psp; 1659 1660 DRM_INFO("PSP is resuming...\n"); 1661 1662 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 1663 if (ret) { 1664 DRM_ERROR("Failed to process memory training!\n"); 1665 return ret; 1666 } 1667 1668 mutex_lock(&adev->firmware.mutex); 1669 1670 ret = psp_hw_start(psp); 1671 if (ret) 1672 goto failed; 1673 1674 ret = psp_np_fw_load(psp); 1675 if (ret) 1676 goto failed; 1677 1678 ret = psp_asd_load(psp); 1679 if (ret) { 1680 DRM_ERROR("PSP load asd failed!\n"); 1681 goto failed; 1682 } 1683 1684 if (adev->gmc.xgmi.num_physical_nodes > 1) { 1685 ret = psp_xgmi_initialize(psp); 1686 /* Warning the XGMI seesion initialize failure 1687 * Instead of stop driver initialization 1688 */ 1689 if (ret) 1690 dev_err(psp->adev->dev, 1691 "XGMI: Failed to initialize XGMI session\n"); 1692 } 1693 1694 if (psp->adev->psp.ta_fw) { 1695 ret = psp_ras_initialize(psp); 1696 if (ret) 1697 dev_err(psp->adev->dev, 1698 "RAS: Failed to initialize RAS\n"); 1699 1700 ret = psp_hdcp_initialize(psp); 1701 if (ret) 1702 dev_err(psp->adev->dev, 1703 "HDCP: Failed to initialize HDCP\n"); 1704 1705 ret = psp_dtm_initialize(psp); 1706 if (ret) 1707 dev_err(psp->adev->dev, 1708 "DTM: Failed to initialize DTM\n"); 1709 } 1710 1711 mutex_unlock(&adev->firmware.mutex); 1712 1713 return 0; 1714 1715 failed: 1716 DRM_ERROR("PSP resume failed\n"); 1717 mutex_unlock(&adev->firmware.mutex); 1718 return ret; 1719 } 1720 1721 int psp_gpu_reset(struct amdgpu_device *adev) 1722 { 1723 int ret; 1724 1725 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1726 return 0; 1727 1728 mutex_lock(&adev->psp.mutex); 1729 ret = psp_mode1_reset(&adev->psp); 1730 mutex_unlock(&adev->psp.mutex); 1731 1732 return ret; 1733 } 1734 1735 int psp_rlc_autoload_start(struct psp_context *psp) 1736 { 1737 int ret; 1738 struct psp_gfx_cmd_resp *cmd; 1739 1740 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1741 if (!cmd) 1742 return -ENOMEM; 1743 1744 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 1745 1746 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1747 psp->fence_buf_mc_addr); 1748 kfree(cmd); 1749 return ret; 1750 } 1751 1752 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, 1753 uint64_t cmd_gpu_addr, int cmd_size) 1754 { 1755 struct amdgpu_firmware_info ucode = {0}; 1756 1757 ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : 1758 AMDGPU_UCODE_ID_VCN0_RAM; 1759 ucode.mc_addr = cmd_gpu_addr; 1760 ucode.ucode_size = cmd_size; 1761 1762 return psp_execute_np_fw_load(&adev->psp, &ucode); 1763 } 1764 1765 int psp_ring_cmd_submit(struct psp_context *psp, 1766 uint64_t cmd_buf_mc_addr, 1767 uint64_t fence_mc_addr, 1768 int index) 1769 { 1770 unsigned int psp_write_ptr_reg = 0; 1771 struct psp_gfx_rb_frame *write_frame; 1772 struct psp_ring *ring = &psp->km_ring; 1773 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 1774 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 1775 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 1776 struct amdgpu_device *adev = psp->adev; 1777 uint32_t ring_size_dw = ring->ring_size / 4; 1778 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 1779 1780 /* KM (GPCOM) prepare write pointer */ 1781 psp_write_ptr_reg = psp_ring_get_wptr(psp); 1782 1783 /* Update KM RB frame pointer to new frame */ 1784 /* write_frame ptr increments by size of rb_frame in bytes */ 1785 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 1786 if ((psp_write_ptr_reg % ring_size_dw) == 0) 1787 write_frame = ring_buffer_start; 1788 else 1789 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 1790 /* Check invalid write_frame ptr address */ 1791 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 1792 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 1793 ring_buffer_start, ring_buffer_end, write_frame); 1794 DRM_ERROR("write_frame is pointing to address out of bounds\n"); 1795 return -EINVAL; 1796 } 1797 1798 /* Initialize KM RB frame */ 1799 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 1800 1801 /* Update KM RB frame */ 1802 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 1803 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 1804 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 1805 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 1806 write_frame->fence_value = index; 1807 amdgpu_asic_flush_hdp(adev, NULL); 1808 1809 /* Update the write Pointer in DWORDs */ 1810 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 1811 psp_ring_set_wptr(psp, psp_write_ptr_reg); 1812 return 0; 1813 } 1814 1815 static bool psp_check_fw_loading_status(struct amdgpu_device *adev, 1816 enum AMDGPU_UCODE_ID ucode_type) 1817 { 1818 struct amdgpu_firmware_info *ucode = NULL; 1819 1820 if (!adev->firmware.fw_size) 1821 return false; 1822 1823 ucode = &adev->firmware.ucode[ucode_type]; 1824 if (!ucode->fw || !ucode->ucode_size) 1825 return false; 1826 1827 return psp_compare_sram_data(&adev->psp, ucode, ucode_type); 1828 } 1829 1830 static int psp_set_clockgating_state(void *handle, 1831 enum amd_clockgating_state state) 1832 { 1833 return 0; 1834 } 1835 1836 static int psp_set_powergating_state(void *handle, 1837 enum amd_powergating_state state) 1838 { 1839 return 0; 1840 } 1841 1842 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 1843 struct device_attribute *attr, 1844 char *buf) 1845 { 1846 struct drm_device *ddev = dev_get_drvdata(dev); 1847 struct amdgpu_device *adev = ddev->dev_private; 1848 uint32_t fw_ver; 1849 int ret; 1850 1851 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 1852 DRM_INFO("PSP block is not ready yet."); 1853 return -EBUSY; 1854 } 1855 1856 mutex_lock(&adev->psp.mutex); 1857 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 1858 mutex_unlock(&adev->psp.mutex); 1859 1860 if (ret) { 1861 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); 1862 return ret; 1863 } 1864 1865 return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); 1866 } 1867 1868 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 1869 struct device_attribute *attr, 1870 const char *buf, 1871 size_t count) 1872 { 1873 struct drm_device *ddev = dev_get_drvdata(dev); 1874 struct amdgpu_device *adev = ddev->dev_private; 1875 void *cpu_addr; 1876 dma_addr_t dma_addr; 1877 int ret; 1878 char fw_name[100]; 1879 const struct firmware *usbc_pd_fw; 1880 1881 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 1882 DRM_INFO("PSP block is not ready yet."); 1883 return -EBUSY; 1884 } 1885 1886 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 1887 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 1888 if (ret) 1889 goto fail; 1890 1891 /* We need contiguous physical mem to place the FW for psp to access */ 1892 cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); 1893 1894 ret = dma_mapping_error(adev->dev, dma_addr); 1895 if (ret) 1896 goto rel_buf; 1897 1898 memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 1899 1900 /* 1901 * x86 specific workaround. 1902 * Without it the buffer is invisible in PSP. 1903 * 1904 * TODO Remove once PSP starts snooping CPU cache 1905 */ 1906 #ifdef CONFIG_X86 1907 clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); 1908 #endif 1909 1910 mutex_lock(&adev->psp.mutex); 1911 ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); 1912 mutex_unlock(&adev->psp.mutex); 1913 1914 rel_buf: 1915 dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); 1916 release_firmware(usbc_pd_fw); 1917 1918 fail: 1919 if (ret) { 1920 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); 1921 return ret; 1922 } 1923 1924 return count; 1925 } 1926 1927 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, 1928 psp_usbc_pd_fw_sysfs_read, 1929 psp_usbc_pd_fw_sysfs_write); 1930 1931 1932 1933 const struct amd_ip_funcs psp_ip_funcs = { 1934 .name = "psp", 1935 .early_init = psp_early_init, 1936 .late_init = NULL, 1937 .sw_init = psp_sw_init, 1938 .sw_fini = psp_sw_fini, 1939 .hw_init = psp_hw_init, 1940 .hw_fini = psp_hw_fini, 1941 .suspend = psp_suspend, 1942 .resume = psp_resume, 1943 .is_idle = NULL, 1944 .check_soft_reset = NULL, 1945 .wait_for_idle = NULL, 1946 .soft_reset = NULL, 1947 .set_clockgating_state = psp_set_clockgating_state, 1948 .set_powergating_state = psp_set_powergating_state, 1949 }; 1950 1951 static int psp_sysfs_init(struct amdgpu_device *adev) 1952 { 1953 int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); 1954 1955 if (ret) 1956 DRM_ERROR("Failed to create USBC PD FW control file!"); 1957 1958 return ret; 1959 } 1960 1961 static void psp_sysfs_fini(struct amdgpu_device *adev) 1962 { 1963 device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); 1964 } 1965 1966 static const struct amdgpu_psp_funcs psp_funcs = { 1967 .check_fw_loading_status = psp_check_fw_loading_status, 1968 }; 1969 1970 static void psp_set_funcs(struct amdgpu_device *adev) 1971 { 1972 if (NULL == adev->firmware.funcs) 1973 adev->firmware.funcs = &psp_funcs; 1974 } 1975 1976 const struct amdgpu_ip_block_version psp_v3_1_ip_block = 1977 { 1978 .type = AMD_IP_BLOCK_TYPE_PSP, 1979 .major = 3, 1980 .minor = 1, 1981 .rev = 0, 1982 .funcs = &psp_ip_funcs, 1983 }; 1984 1985 const struct amdgpu_ip_block_version psp_v10_0_ip_block = 1986 { 1987 .type = AMD_IP_BLOCK_TYPE_PSP, 1988 .major = 10, 1989 .minor = 0, 1990 .rev = 0, 1991 .funcs = &psp_ip_funcs, 1992 }; 1993 1994 const struct amdgpu_ip_block_version psp_v11_0_ip_block = 1995 { 1996 .type = AMD_IP_BLOCK_TYPE_PSP, 1997 .major = 11, 1998 .minor = 0, 1999 .rev = 0, 2000 .funcs = &psp_ip_funcs, 2001 }; 2002 2003 const struct amdgpu_ip_block_version psp_v12_0_ip_block = 2004 { 2005 .type = AMD_IP_BLOCK_TYPE_PSP, 2006 .major = 12, 2007 .minor = 0, 2008 .rev = 0, 2009 .funcs = &psp_ip_funcs, 2010 }; 2011