1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <linux/dma-mapping.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "soc15_common.h" 33 #include "psp_v3_1.h" 34 #include "psp_v10_0.h" 35 #include "psp_v11_0.h" 36 #include "psp_v12_0.h" 37 38 #include "amdgpu_ras.h" 39 40 static void psp_set_funcs(struct amdgpu_device *adev); 41 42 static int psp_sysfs_init(struct amdgpu_device *adev); 43 static void psp_sysfs_fini(struct amdgpu_device *adev); 44 45 /* 46 * Due to DF Cstate management centralized to PMFW, the firmware 47 * loading sequence will be updated as below: 48 * - Load KDB 49 * - Load SYS_DRV 50 * - Load tOS 51 * - Load PMFW 52 * - Setup TMR 53 * - Load other non-psp fw 54 * - Load ASD 55 * - Load XGMI/RAS/HDCP/DTM TA if any 56 * 57 * This new sequence is required for 58 * - Arcturus 59 * - Navi12 and onwards 60 */ 61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 62 { 63 struct amdgpu_device *adev = psp->adev; 64 65 psp->pmfw_centralized_cstate_management = false; 66 67 if (amdgpu_sriov_vf(adev)) 68 return; 69 70 if (adev->flags & AMD_IS_APU) 71 return; 72 73 if ((adev->asic_type == CHIP_ARCTURUS) || 74 (adev->asic_type >= CHIP_NAVI12)) 75 psp->pmfw_centralized_cstate_management = true; 76 } 77 78 static int psp_early_init(void *handle) 79 { 80 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 81 struct psp_context *psp = &adev->psp; 82 83 psp_set_funcs(adev); 84 85 switch (adev->asic_type) { 86 case CHIP_VEGA10: 87 case CHIP_VEGA12: 88 psp_v3_1_set_psp_funcs(psp); 89 psp->autoload_supported = false; 90 break; 91 case CHIP_RAVEN: 92 psp_v10_0_set_psp_funcs(psp); 93 psp->autoload_supported = false; 94 break; 95 case CHIP_VEGA20: 96 case CHIP_ARCTURUS: 97 psp_v11_0_set_psp_funcs(psp); 98 psp->autoload_supported = false; 99 break; 100 case CHIP_NAVI10: 101 case CHIP_NAVI14: 102 case CHIP_NAVI12: 103 psp_v11_0_set_psp_funcs(psp); 104 psp->autoload_supported = true; 105 break; 106 case CHIP_RENOIR: 107 psp_v12_0_set_psp_funcs(psp); 108 break; 109 default: 110 return -EINVAL; 111 } 112 113 psp->adev = adev; 114 115 psp_check_pmfw_centralized_cstate_management(psp); 116 117 return 0; 118 } 119 120 static int psp_sw_init(void *handle) 121 { 122 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 123 struct psp_context *psp = &adev->psp; 124 int ret; 125 126 ret = psp_init_microcode(psp); 127 if (ret) { 128 DRM_ERROR("Failed to load psp firmware!\n"); 129 return ret; 130 } 131 132 ret = psp_mem_training_init(psp); 133 if (ret) { 134 DRM_ERROR("Failed to initialize memory training!\n"); 135 return ret; 136 } 137 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 138 if (ret) { 139 DRM_ERROR("Failed to process memory training!\n"); 140 return ret; 141 } 142 143 if (adev->asic_type == CHIP_NAVI10) { 144 ret= psp_sysfs_init(adev); 145 if (ret) { 146 return ret; 147 } 148 } 149 150 return 0; 151 } 152 153 static int psp_sw_fini(void *handle) 154 { 155 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 156 157 psp_mem_training_fini(&adev->psp); 158 release_firmware(adev->psp.sos_fw); 159 adev->psp.sos_fw = NULL; 160 release_firmware(adev->psp.asd_fw); 161 adev->psp.asd_fw = NULL; 162 if (adev->psp.ta_fw) { 163 release_firmware(adev->psp.ta_fw); 164 adev->psp.ta_fw = NULL; 165 } 166 167 if (adev->asic_type == CHIP_NAVI10) 168 psp_sysfs_fini(adev); 169 170 return 0; 171 } 172 173 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 174 uint32_t reg_val, uint32_t mask, bool check_changed) 175 { 176 uint32_t val; 177 int i; 178 struct amdgpu_device *adev = psp->adev; 179 180 for (i = 0; i < adev->usec_timeout; i++) { 181 val = RREG32(reg_index); 182 if (check_changed) { 183 if (val != reg_val) 184 return 0; 185 } else { 186 if ((val & mask) == reg_val) 187 return 0; 188 } 189 udelay(1); 190 } 191 192 return -ETIME; 193 } 194 195 static int 196 psp_cmd_submit_buf(struct psp_context *psp, 197 struct amdgpu_firmware_info *ucode, 198 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 199 { 200 int ret; 201 int index; 202 int timeout = 2000; 203 bool ras_intr = false; 204 bool skip_unsupport = false; 205 206 mutex_lock(&psp->mutex); 207 208 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 209 210 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 211 212 index = atomic_inc_return(&psp->fence_value); 213 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 214 if (ret) { 215 atomic_dec(&psp->fence_value); 216 mutex_unlock(&psp->mutex); 217 return ret; 218 } 219 220 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 221 while (*((unsigned int *)psp->fence_buf) != index) { 222 if (--timeout == 0) 223 break; 224 /* 225 * Shouldn't wait for timeout when err_event_athub occurs, 226 * because gpu reset thread triggered and lock resource should 227 * be released for psp resume sequence. 228 */ 229 ras_intr = amdgpu_ras_intr_triggered(); 230 if (ras_intr) 231 break; 232 msleep(1); 233 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 234 } 235 236 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command in SRIOV */ 237 skip_unsupport = (psp->cmd_buf_mem->resp.status == 0xffff000a) && amdgpu_sriov_vf(psp->adev); 238 239 /* In some cases, psp response status is not 0 even there is no 240 * problem while the command is submitted. Some version of PSP FW 241 * doesn't write 0 to that field. 242 * So here we would like to only print a warning instead of an error 243 * during psp initialization to avoid breaking hw_init and it doesn't 244 * return -EINVAL. 245 */ 246 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 247 if (ucode) 248 DRM_WARN("failed to load ucode id (%d) ", 249 ucode->ucode_id); 250 DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n", 251 psp->cmd_buf_mem->cmd_id, 252 psp->cmd_buf_mem->resp.status); 253 if (!timeout) { 254 mutex_unlock(&psp->mutex); 255 return -EINVAL; 256 } 257 } 258 259 /* get xGMI session id from response buffer */ 260 cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id; 261 262 if (ucode) { 263 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 264 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 265 } 266 mutex_unlock(&psp->mutex); 267 268 return ret; 269 } 270 271 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 272 struct psp_gfx_cmd_resp *cmd, 273 uint64_t tmr_mc, uint32_t size) 274 { 275 if (psp_support_vmr_ring(psp)) 276 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 277 else 278 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 279 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 280 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 281 cmd->cmd.cmd_setup_tmr.buf_size = size; 282 } 283 284 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 285 uint64_t pri_buf_mc, uint32_t size) 286 { 287 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 288 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 289 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 290 cmd->cmd.cmd_load_toc.toc_size = size; 291 } 292 293 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 294 static int psp_load_toc(struct psp_context *psp, 295 uint32_t *tmr_size) 296 { 297 int ret; 298 struct psp_gfx_cmd_resp *cmd; 299 300 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 301 if (!cmd) 302 return -ENOMEM; 303 /* Copy toc to psp firmware private buffer */ 304 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 305 memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size); 306 307 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size); 308 309 ret = psp_cmd_submit_buf(psp, NULL, cmd, 310 psp->fence_buf_mc_addr); 311 if (!ret) 312 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 313 kfree(cmd); 314 return ret; 315 } 316 317 /* Set up Trusted Memory Region */ 318 static int psp_tmr_init(struct psp_context *psp) 319 { 320 int ret; 321 int tmr_size; 322 void *tmr_buf; 323 void **pptr; 324 325 /* 326 * According to HW engineer, they prefer the TMR address be "naturally 327 * aligned" , e.g. the start address be an integer divide of TMR size. 328 * 329 * Note: this memory need be reserved till the driver 330 * uninitializes. 331 */ 332 tmr_size = PSP_TMR_SIZE; 333 334 /* For ASICs support RLC autoload, psp will parse the toc 335 * and calculate the total size of TMR needed */ 336 if (!amdgpu_sriov_vf(psp->adev) && 337 psp->toc_start_addr && 338 psp->toc_bin_size && 339 psp->fw_pri_buf) { 340 ret = psp_load_toc(psp, &tmr_size); 341 if (ret) { 342 DRM_ERROR("Failed to load toc\n"); 343 return ret; 344 } 345 } 346 347 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 348 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE, 349 AMDGPU_GEM_DOMAIN_VRAM, 350 &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 351 352 return ret; 353 } 354 355 static int psp_tmr_load(struct psp_context *psp) 356 { 357 int ret; 358 struct psp_gfx_cmd_resp *cmd; 359 360 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 361 if (!cmd) 362 return -ENOMEM; 363 364 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, 365 amdgpu_bo_size(psp->tmr_bo)); 366 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 367 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 368 369 ret = psp_cmd_submit_buf(psp, NULL, cmd, 370 psp->fence_buf_mc_addr); 371 372 kfree(cmd); 373 374 return ret; 375 } 376 377 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 378 uint64_t asd_mc, uint32_t size) 379 { 380 cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; 381 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); 382 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); 383 cmd->cmd.cmd_load_ta.app_len = size; 384 385 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; 386 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; 387 cmd->cmd.cmd_load_ta.cmd_buf_len = 0; 388 } 389 390 static int psp_asd_load(struct psp_context *psp) 391 { 392 int ret; 393 struct psp_gfx_cmd_resp *cmd; 394 395 /* If PSP version doesn't match ASD version, asd loading will be failed. 396 * add workaround to bypass it for sriov now. 397 * TODO: add version check to make it common 398 */ 399 if (amdgpu_sriov_vf(psp->adev)) 400 return 0; 401 402 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 403 if (!cmd) 404 return -ENOMEM; 405 406 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 407 memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); 408 409 psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 410 psp->asd_ucode_size); 411 412 ret = psp_cmd_submit_buf(psp, NULL, cmd, 413 psp->fence_buf_mc_addr); 414 if (!ret) { 415 psp->asd_context.asd_initialized = true; 416 psp->asd_context.session_id = cmd->resp.session_id; 417 } 418 419 kfree(cmd); 420 421 return ret; 422 } 423 424 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 425 uint32_t session_id) 426 { 427 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 428 cmd->cmd.cmd_unload_ta.session_id = session_id; 429 } 430 431 static int psp_asd_unload(struct psp_context *psp) 432 { 433 int ret; 434 struct psp_gfx_cmd_resp *cmd; 435 436 if (amdgpu_sriov_vf(psp->adev)) 437 return 0; 438 439 if (!psp->asd_context.asd_initialized) 440 return 0; 441 442 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 443 if (!cmd) 444 return -ENOMEM; 445 446 psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); 447 448 ret = psp_cmd_submit_buf(psp, NULL, cmd, 449 psp->fence_buf_mc_addr); 450 if (!ret) 451 psp->asd_context.asd_initialized = false; 452 453 kfree(cmd); 454 455 return ret; 456 } 457 458 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 459 uint32_t id, uint32_t value) 460 { 461 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 462 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 463 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 464 } 465 466 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 467 uint32_t value) 468 { 469 struct psp_gfx_cmd_resp *cmd = NULL; 470 int ret = 0; 471 472 if (reg >= PSP_REG_LAST) 473 return -EINVAL; 474 475 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 476 if (!cmd) 477 return -ENOMEM; 478 479 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 480 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 481 482 kfree(cmd); 483 return ret; 484 } 485 486 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 487 uint64_t ta_bin_mc, 488 uint32_t ta_bin_size, 489 uint64_t ta_shared_mc, 490 uint32_t ta_shared_size) 491 { 492 cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 493 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 494 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 495 cmd->cmd.cmd_load_ta.app_len = ta_bin_size; 496 497 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); 498 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); 499 cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; 500 } 501 502 static int psp_xgmi_init_shared_buf(struct psp_context *psp) 503 { 504 int ret; 505 506 /* 507 * Allocate 16k memory aligned to 4k from Frame Buffer (local 508 * physical) for xgmi ta <-> Driver 509 */ 510 ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, 511 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 512 &psp->xgmi_context.xgmi_shared_bo, 513 &psp->xgmi_context.xgmi_shared_mc_addr, 514 &psp->xgmi_context.xgmi_shared_buf); 515 516 return ret; 517 } 518 519 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 520 uint32_t ta_cmd_id, 521 uint32_t session_id) 522 { 523 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 524 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 525 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 526 } 527 528 int psp_ta_invoke(struct psp_context *psp, 529 uint32_t ta_cmd_id, 530 uint32_t session_id) 531 { 532 int ret; 533 struct psp_gfx_cmd_resp *cmd; 534 535 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 536 if (!cmd) 537 return -ENOMEM; 538 539 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); 540 541 ret = psp_cmd_submit_buf(psp, NULL, cmd, 542 psp->fence_buf_mc_addr); 543 544 kfree(cmd); 545 546 return ret; 547 } 548 549 static int psp_xgmi_load(struct psp_context *psp) 550 { 551 int ret; 552 struct psp_gfx_cmd_resp *cmd; 553 554 /* 555 * TODO: bypass the loading in sriov for now 556 */ 557 558 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 559 if (!cmd) 560 return -ENOMEM; 561 562 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 563 memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); 564 565 psp_prep_ta_load_cmd_buf(cmd, 566 psp->fw_pri_mc_addr, 567 psp->ta_xgmi_ucode_size, 568 psp->xgmi_context.xgmi_shared_mc_addr, 569 PSP_XGMI_SHARED_MEM_SIZE); 570 571 ret = psp_cmd_submit_buf(psp, NULL, cmd, 572 psp->fence_buf_mc_addr); 573 574 if (!ret) { 575 psp->xgmi_context.initialized = 1; 576 psp->xgmi_context.session_id = cmd->resp.session_id; 577 } 578 579 kfree(cmd); 580 581 return ret; 582 } 583 584 static int psp_xgmi_unload(struct psp_context *psp) 585 { 586 int ret; 587 struct psp_gfx_cmd_resp *cmd; 588 struct amdgpu_device *adev = psp->adev; 589 590 /* XGMI TA unload currently is not supported on Arcturus */ 591 if (adev->asic_type == CHIP_ARCTURUS) 592 return 0; 593 594 /* 595 * TODO: bypass the unloading in sriov for now 596 */ 597 598 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 599 if (!cmd) 600 return -ENOMEM; 601 602 psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); 603 604 ret = psp_cmd_submit_buf(psp, NULL, cmd, 605 psp->fence_buf_mc_addr); 606 607 kfree(cmd); 608 609 return ret; 610 } 611 612 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 613 { 614 return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); 615 } 616 617 int psp_xgmi_terminate(struct psp_context *psp) 618 { 619 int ret; 620 621 if (!psp->xgmi_context.initialized) 622 return 0; 623 624 ret = psp_xgmi_unload(psp); 625 if (ret) 626 return ret; 627 628 psp->xgmi_context.initialized = 0; 629 630 /* free xgmi shared memory */ 631 amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, 632 &psp->xgmi_context.xgmi_shared_mc_addr, 633 &psp->xgmi_context.xgmi_shared_buf); 634 635 return 0; 636 } 637 638 int psp_xgmi_initialize(struct psp_context *psp) 639 { 640 struct ta_xgmi_shared_memory *xgmi_cmd; 641 int ret; 642 643 if (!psp->adev->psp.ta_fw || 644 !psp->adev->psp.ta_xgmi_ucode_size || 645 !psp->adev->psp.ta_xgmi_start_addr) 646 return -ENOENT; 647 648 if (!psp->xgmi_context.initialized) { 649 ret = psp_xgmi_init_shared_buf(psp); 650 if (ret) 651 return ret; 652 } 653 654 /* Load XGMI TA */ 655 ret = psp_xgmi_load(psp); 656 if (ret) 657 return ret; 658 659 /* Initialize XGMI session */ 660 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); 661 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 662 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 663 664 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 665 666 return ret; 667 } 668 669 // ras begin 670 static int psp_ras_init_shared_buf(struct psp_context *psp) 671 { 672 int ret; 673 674 /* 675 * Allocate 16k memory aligned to 4k from Frame Buffer (local 676 * physical) for ras ta <-> Driver 677 */ 678 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE, 679 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 680 &psp->ras.ras_shared_bo, 681 &psp->ras.ras_shared_mc_addr, 682 &psp->ras.ras_shared_buf); 683 684 return ret; 685 } 686 687 static int psp_ras_load(struct psp_context *psp) 688 { 689 int ret; 690 struct psp_gfx_cmd_resp *cmd; 691 692 /* 693 * TODO: bypass the loading in sriov for now 694 */ 695 if (amdgpu_sriov_vf(psp->adev)) 696 return 0; 697 698 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 699 if (!cmd) 700 return -ENOMEM; 701 702 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 703 memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); 704 705 psp_prep_ta_load_cmd_buf(cmd, 706 psp->fw_pri_mc_addr, 707 psp->ta_ras_ucode_size, 708 psp->ras.ras_shared_mc_addr, 709 PSP_RAS_SHARED_MEM_SIZE); 710 711 ret = psp_cmd_submit_buf(psp, NULL, cmd, 712 psp->fence_buf_mc_addr); 713 714 if (!ret) { 715 psp->ras.ras_initialized = true; 716 psp->ras.session_id = cmd->resp.session_id; 717 } 718 719 kfree(cmd); 720 721 return ret; 722 } 723 724 static int psp_ras_unload(struct psp_context *psp) 725 { 726 int ret; 727 struct psp_gfx_cmd_resp *cmd; 728 729 /* 730 * TODO: bypass the unloading in sriov for now 731 */ 732 if (amdgpu_sriov_vf(psp->adev)) 733 return 0; 734 735 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 736 if (!cmd) 737 return -ENOMEM; 738 739 psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id); 740 741 ret = psp_cmd_submit_buf(psp, NULL, cmd, 742 psp->fence_buf_mc_addr); 743 744 kfree(cmd); 745 746 return ret; 747 } 748 749 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 750 { 751 /* 752 * TODO: bypass the loading in sriov for now 753 */ 754 if (amdgpu_sriov_vf(psp->adev)) 755 return 0; 756 757 return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id); 758 } 759 760 int psp_ras_enable_features(struct psp_context *psp, 761 union ta_ras_cmd_input *info, bool enable) 762 { 763 struct ta_ras_shared_memory *ras_cmd; 764 int ret; 765 766 if (!psp->ras.ras_initialized) 767 return -EINVAL; 768 769 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 770 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 771 772 if (enable) 773 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 774 else 775 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 776 777 ras_cmd->ras_in_message = *info; 778 779 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 780 if (ret) 781 return -EINVAL; 782 783 return ras_cmd->ras_status; 784 } 785 786 static int psp_ras_terminate(struct psp_context *psp) 787 { 788 int ret; 789 790 /* 791 * TODO: bypass the terminate in sriov for now 792 */ 793 if (amdgpu_sriov_vf(psp->adev)) 794 return 0; 795 796 if (!psp->ras.ras_initialized) 797 return 0; 798 799 ret = psp_ras_unload(psp); 800 if (ret) 801 return ret; 802 803 psp->ras.ras_initialized = false; 804 805 /* free ras shared memory */ 806 amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, 807 &psp->ras.ras_shared_mc_addr, 808 &psp->ras.ras_shared_buf); 809 810 return 0; 811 } 812 813 static int psp_ras_initialize(struct psp_context *psp) 814 { 815 int ret; 816 817 /* 818 * TODO: bypass the initialize in sriov for now 819 */ 820 if (amdgpu_sriov_vf(psp->adev)) 821 return 0; 822 823 if (!psp->adev->psp.ta_ras_ucode_size || 824 !psp->adev->psp.ta_ras_start_addr) { 825 dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n"); 826 return 0; 827 } 828 829 if (!psp->ras.ras_initialized) { 830 ret = psp_ras_init_shared_buf(psp); 831 if (ret) 832 return ret; 833 } 834 835 ret = psp_ras_load(psp); 836 if (ret) 837 return ret; 838 839 return 0; 840 } 841 // ras end 842 843 // HDCP start 844 static int psp_hdcp_init_shared_buf(struct psp_context *psp) 845 { 846 int ret; 847 848 /* 849 * Allocate 16k memory aligned to 4k from Frame Buffer (local 850 * physical) for hdcp ta <-> Driver 851 */ 852 ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, 853 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 854 &psp->hdcp_context.hdcp_shared_bo, 855 &psp->hdcp_context.hdcp_shared_mc_addr, 856 &psp->hdcp_context.hdcp_shared_buf); 857 858 return ret; 859 } 860 861 static int psp_hdcp_load(struct psp_context *psp) 862 { 863 int ret; 864 struct psp_gfx_cmd_resp *cmd; 865 866 /* 867 * TODO: bypass the loading in sriov for now 868 */ 869 if (amdgpu_sriov_vf(psp->adev)) 870 return 0; 871 872 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 873 if (!cmd) 874 return -ENOMEM; 875 876 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 877 memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, 878 psp->ta_hdcp_ucode_size); 879 880 psp_prep_ta_load_cmd_buf(cmd, 881 psp->fw_pri_mc_addr, 882 psp->ta_hdcp_ucode_size, 883 psp->hdcp_context.hdcp_shared_mc_addr, 884 PSP_HDCP_SHARED_MEM_SIZE); 885 886 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 887 888 if (!ret) { 889 psp->hdcp_context.hdcp_initialized = true; 890 psp->hdcp_context.session_id = cmd->resp.session_id; 891 } 892 893 kfree(cmd); 894 895 return ret; 896 } 897 static int psp_hdcp_initialize(struct psp_context *psp) 898 { 899 int ret; 900 901 /* 902 * TODO: bypass the initialize in sriov for now 903 */ 904 if (amdgpu_sriov_vf(psp->adev)) 905 return 0; 906 907 if (!psp->adev->psp.ta_hdcp_ucode_size || 908 !psp->adev->psp.ta_hdcp_start_addr) { 909 dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n"); 910 return 0; 911 } 912 913 if (!psp->hdcp_context.hdcp_initialized) { 914 ret = psp_hdcp_init_shared_buf(psp); 915 if (ret) 916 return ret; 917 } 918 919 ret = psp_hdcp_load(psp); 920 if (ret) 921 return ret; 922 923 return 0; 924 } 925 926 static int psp_hdcp_unload(struct psp_context *psp) 927 { 928 int ret; 929 struct psp_gfx_cmd_resp *cmd; 930 931 /* 932 * TODO: bypass the unloading in sriov for now 933 */ 934 if (amdgpu_sriov_vf(psp->adev)) 935 return 0; 936 937 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 938 if (!cmd) 939 return -ENOMEM; 940 941 psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); 942 943 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 944 945 kfree(cmd); 946 947 return ret; 948 } 949 950 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 951 { 952 /* 953 * TODO: bypass the loading in sriov for now 954 */ 955 if (amdgpu_sriov_vf(psp->adev)) 956 return 0; 957 958 return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id); 959 } 960 961 static int psp_hdcp_terminate(struct psp_context *psp) 962 { 963 int ret; 964 965 /* 966 * TODO: bypass the terminate in sriov for now 967 */ 968 if (amdgpu_sriov_vf(psp->adev)) 969 return 0; 970 971 if (!psp->hdcp_context.hdcp_initialized) 972 return 0; 973 974 ret = psp_hdcp_unload(psp); 975 if (ret) 976 return ret; 977 978 psp->hdcp_context.hdcp_initialized = false; 979 980 /* free hdcp shared memory */ 981 amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, 982 &psp->hdcp_context.hdcp_shared_mc_addr, 983 &psp->hdcp_context.hdcp_shared_buf); 984 985 return 0; 986 } 987 // HDCP end 988 989 // DTM start 990 static int psp_dtm_init_shared_buf(struct psp_context *psp) 991 { 992 int ret; 993 994 /* 995 * Allocate 16k memory aligned to 4k from Frame Buffer (local 996 * physical) for dtm ta <-> Driver 997 */ 998 ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, 999 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1000 &psp->dtm_context.dtm_shared_bo, 1001 &psp->dtm_context.dtm_shared_mc_addr, 1002 &psp->dtm_context.dtm_shared_buf); 1003 1004 return ret; 1005 } 1006 1007 static int psp_dtm_load(struct psp_context *psp) 1008 { 1009 int ret; 1010 struct psp_gfx_cmd_resp *cmd; 1011 1012 /* 1013 * TODO: bypass the loading in sriov for now 1014 */ 1015 if (amdgpu_sriov_vf(psp->adev)) 1016 return 0; 1017 1018 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1019 if (!cmd) 1020 return -ENOMEM; 1021 1022 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1023 memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); 1024 1025 psp_prep_ta_load_cmd_buf(cmd, 1026 psp->fw_pri_mc_addr, 1027 psp->ta_dtm_ucode_size, 1028 psp->dtm_context.dtm_shared_mc_addr, 1029 PSP_DTM_SHARED_MEM_SIZE); 1030 1031 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1032 1033 if (!ret) { 1034 psp->dtm_context.dtm_initialized = true; 1035 psp->dtm_context.session_id = cmd->resp.session_id; 1036 } 1037 1038 kfree(cmd); 1039 1040 return ret; 1041 } 1042 1043 static int psp_dtm_initialize(struct psp_context *psp) 1044 { 1045 int ret; 1046 1047 /* 1048 * TODO: bypass the initialize in sriov for now 1049 */ 1050 if (amdgpu_sriov_vf(psp->adev)) 1051 return 0; 1052 1053 if (!psp->adev->psp.ta_dtm_ucode_size || 1054 !psp->adev->psp.ta_dtm_start_addr) { 1055 dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n"); 1056 return 0; 1057 } 1058 1059 if (!psp->dtm_context.dtm_initialized) { 1060 ret = psp_dtm_init_shared_buf(psp); 1061 if (ret) 1062 return ret; 1063 } 1064 1065 ret = psp_dtm_load(psp); 1066 if (ret) 1067 return ret; 1068 1069 return 0; 1070 } 1071 1072 static int psp_dtm_unload(struct psp_context *psp) 1073 { 1074 int ret; 1075 struct psp_gfx_cmd_resp *cmd; 1076 1077 /* 1078 * TODO: bypass the unloading in sriov for now 1079 */ 1080 if (amdgpu_sriov_vf(psp->adev)) 1081 return 0; 1082 1083 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1084 if (!cmd) 1085 return -ENOMEM; 1086 1087 psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id); 1088 1089 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1090 1091 kfree(cmd); 1092 1093 return ret; 1094 } 1095 1096 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1097 { 1098 /* 1099 * TODO: bypass the loading in sriov for now 1100 */ 1101 if (amdgpu_sriov_vf(psp->adev)) 1102 return 0; 1103 1104 return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id); 1105 } 1106 1107 static int psp_dtm_terminate(struct psp_context *psp) 1108 { 1109 int ret; 1110 1111 /* 1112 * TODO: bypass the terminate in sriov for now 1113 */ 1114 if (amdgpu_sriov_vf(psp->adev)) 1115 return 0; 1116 1117 if (!psp->dtm_context.dtm_initialized) 1118 return 0; 1119 1120 ret = psp_dtm_unload(psp); 1121 if (ret) 1122 return ret; 1123 1124 psp->dtm_context.dtm_initialized = false; 1125 1126 /* free hdcp shared memory */ 1127 amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, 1128 &psp->dtm_context.dtm_shared_mc_addr, 1129 &psp->dtm_context.dtm_shared_buf); 1130 1131 return 0; 1132 } 1133 // DTM end 1134 1135 static int psp_hw_start(struct psp_context *psp) 1136 { 1137 struct amdgpu_device *adev = psp->adev; 1138 int ret; 1139 1140 if (!amdgpu_sriov_vf(adev)) { 1141 if (psp->kdb_bin_size && 1142 (psp->funcs->bootloader_load_kdb != NULL)) { 1143 ret = psp_bootloader_load_kdb(psp); 1144 if (ret) { 1145 DRM_ERROR("PSP load kdb failed!\n"); 1146 return ret; 1147 } 1148 } 1149 1150 ret = psp_bootloader_load_sysdrv(psp); 1151 if (ret) { 1152 DRM_ERROR("PSP load sysdrv failed!\n"); 1153 return ret; 1154 } 1155 1156 ret = psp_bootloader_load_sos(psp); 1157 if (ret) { 1158 DRM_ERROR("PSP load sos failed!\n"); 1159 return ret; 1160 } 1161 } 1162 1163 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 1164 if (ret) { 1165 DRM_ERROR("PSP create ring failed!\n"); 1166 return ret; 1167 } 1168 1169 ret = psp_tmr_init(psp); 1170 if (ret) { 1171 DRM_ERROR("PSP tmr init failed!\n"); 1172 return ret; 1173 } 1174 1175 /* 1176 * For those ASICs with DF Cstate management centralized 1177 * to PMFW, TMR setup should be performed after PMFW 1178 * loaded and before other non-psp firmware loaded. 1179 */ 1180 if (!psp->pmfw_centralized_cstate_management) { 1181 ret = psp_tmr_load(psp); 1182 if (ret) { 1183 DRM_ERROR("PSP load tmr failed!\n"); 1184 return ret; 1185 } 1186 } 1187 1188 return 0; 1189 } 1190 1191 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 1192 enum psp_gfx_fw_type *type) 1193 { 1194 switch (ucode->ucode_id) { 1195 case AMDGPU_UCODE_ID_SDMA0: 1196 *type = GFX_FW_TYPE_SDMA0; 1197 break; 1198 case AMDGPU_UCODE_ID_SDMA1: 1199 *type = GFX_FW_TYPE_SDMA1; 1200 break; 1201 case AMDGPU_UCODE_ID_SDMA2: 1202 *type = GFX_FW_TYPE_SDMA2; 1203 break; 1204 case AMDGPU_UCODE_ID_SDMA3: 1205 *type = GFX_FW_TYPE_SDMA3; 1206 break; 1207 case AMDGPU_UCODE_ID_SDMA4: 1208 *type = GFX_FW_TYPE_SDMA4; 1209 break; 1210 case AMDGPU_UCODE_ID_SDMA5: 1211 *type = GFX_FW_TYPE_SDMA5; 1212 break; 1213 case AMDGPU_UCODE_ID_SDMA6: 1214 *type = GFX_FW_TYPE_SDMA6; 1215 break; 1216 case AMDGPU_UCODE_ID_SDMA7: 1217 *type = GFX_FW_TYPE_SDMA7; 1218 break; 1219 case AMDGPU_UCODE_ID_CP_CE: 1220 *type = GFX_FW_TYPE_CP_CE; 1221 break; 1222 case AMDGPU_UCODE_ID_CP_PFP: 1223 *type = GFX_FW_TYPE_CP_PFP; 1224 break; 1225 case AMDGPU_UCODE_ID_CP_ME: 1226 *type = GFX_FW_TYPE_CP_ME; 1227 break; 1228 case AMDGPU_UCODE_ID_CP_MEC1: 1229 *type = GFX_FW_TYPE_CP_MEC; 1230 break; 1231 case AMDGPU_UCODE_ID_CP_MEC1_JT: 1232 *type = GFX_FW_TYPE_CP_MEC_ME1; 1233 break; 1234 case AMDGPU_UCODE_ID_CP_MEC2: 1235 *type = GFX_FW_TYPE_CP_MEC; 1236 break; 1237 case AMDGPU_UCODE_ID_CP_MEC2_JT: 1238 *type = GFX_FW_TYPE_CP_MEC_ME2; 1239 break; 1240 case AMDGPU_UCODE_ID_RLC_G: 1241 *type = GFX_FW_TYPE_RLC_G; 1242 break; 1243 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 1244 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 1245 break; 1246 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 1247 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 1248 break; 1249 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 1250 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 1251 break; 1252 case AMDGPU_UCODE_ID_SMC: 1253 *type = GFX_FW_TYPE_SMU; 1254 break; 1255 case AMDGPU_UCODE_ID_UVD: 1256 *type = GFX_FW_TYPE_UVD; 1257 break; 1258 case AMDGPU_UCODE_ID_UVD1: 1259 *type = GFX_FW_TYPE_UVD1; 1260 break; 1261 case AMDGPU_UCODE_ID_VCE: 1262 *type = GFX_FW_TYPE_VCE; 1263 break; 1264 case AMDGPU_UCODE_ID_VCN: 1265 *type = GFX_FW_TYPE_VCN; 1266 break; 1267 case AMDGPU_UCODE_ID_VCN1: 1268 *type = GFX_FW_TYPE_VCN1; 1269 break; 1270 case AMDGPU_UCODE_ID_DMCU_ERAM: 1271 *type = GFX_FW_TYPE_DMCU_ERAM; 1272 break; 1273 case AMDGPU_UCODE_ID_DMCU_INTV: 1274 *type = GFX_FW_TYPE_DMCU_ISR; 1275 break; 1276 case AMDGPU_UCODE_ID_VCN0_RAM: 1277 *type = GFX_FW_TYPE_VCN0_RAM; 1278 break; 1279 case AMDGPU_UCODE_ID_VCN1_RAM: 1280 *type = GFX_FW_TYPE_VCN1_RAM; 1281 break; 1282 case AMDGPU_UCODE_ID_DMCUB: 1283 *type = GFX_FW_TYPE_DMUB; 1284 break; 1285 case AMDGPU_UCODE_ID_MAXIMUM: 1286 default: 1287 return -EINVAL; 1288 } 1289 1290 return 0; 1291 } 1292 1293 static void psp_print_fw_hdr(struct psp_context *psp, 1294 struct amdgpu_firmware_info *ucode) 1295 { 1296 struct amdgpu_device *adev = psp->adev; 1297 struct common_firmware_header *hdr; 1298 1299 switch (ucode->ucode_id) { 1300 case AMDGPU_UCODE_ID_SDMA0: 1301 case AMDGPU_UCODE_ID_SDMA1: 1302 case AMDGPU_UCODE_ID_SDMA2: 1303 case AMDGPU_UCODE_ID_SDMA3: 1304 case AMDGPU_UCODE_ID_SDMA4: 1305 case AMDGPU_UCODE_ID_SDMA5: 1306 case AMDGPU_UCODE_ID_SDMA6: 1307 case AMDGPU_UCODE_ID_SDMA7: 1308 hdr = (struct common_firmware_header *) 1309 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 1310 amdgpu_ucode_print_sdma_hdr(hdr); 1311 break; 1312 case AMDGPU_UCODE_ID_CP_CE: 1313 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 1314 amdgpu_ucode_print_gfx_hdr(hdr); 1315 break; 1316 case AMDGPU_UCODE_ID_CP_PFP: 1317 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 1318 amdgpu_ucode_print_gfx_hdr(hdr); 1319 break; 1320 case AMDGPU_UCODE_ID_CP_ME: 1321 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 1322 amdgpu_ucode_print_gfx_hdr(hdr); 1323 break; 1324 case AMDGPU_UCODE_ID_CP_MEC1: 1325 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 1326 amdgpu_ucode_print_gfx_hdr(hdr); 1327 break; 1328 case AMDGPU_UCODE_ID_RLC_G: 1329 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 1330 amdgpu_ucode_print_rlc_hdr(hdr); 1331 break; 1332 case AMDGPU_UCODE_ID_SMC: 1333 hdr = (struct common_firmware_header *)adev->pm.fw->data; 1334 amdgpu_ucode_print_smc_hdr(hdr); 1335 break; 1336 default: 1337 break; 1338 } 1339 } 1340 1341 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, 1342 struct psp_gfx_cmd_resp *cmd) 1343 { 1344 int ret; 1345 uint64_t fw_mem_mc_addr = ucode->mc_addr; 1346 1347 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 1348 1349 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1350 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 1351 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 1352 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 1353 1354 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 1355 if (ret) 1356 DRM_ERROR("Unknown firmware type\n"); 1357 1358 return ret; 1359 } 1360 1361 static int psp_execute_np_fw_load(struct psp_context *psp, 1362 struct amdgpu_firmware_info *ucode) 1363 { 1364 int ret = 0; 1365 1366 ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd); 1367 if (ret) 1368 return ret; 1369 1370 ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, 1371 psp->fence_buf_mc_addr); 1372 1373 return ret; 1374 } 1375 1376 static int psp_np_fw_load(struct psp_context *psp) 1377 { 1378 int i, ret; 1379 struct amdgpu_firmware_info *ucode; 1380 struct amdgpu_device* adev = psp->adev; 1381 1382 if (psp->autoload_supported || 1383 psp->pmfw_centralized_cstate_management) { 1384 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 1385 if (!ucode->fw || amdgpu_sriov_vf(adev)) 1386 goto out; 1387 1388 ret = psp_execute_np_fw_load(psp, ucode); 1389 if (ret) 1390 return ret; 1391 } 1392 1393 if (psp->pmfw_centralized_cstate_management) { 1394 ret = psp_tmr_load(psp); 1395 if (ret) { 1396 DRM_ERROR("PSP load tmr failed!\n"); 1397 return ret; 1398 } 1399 } 1400 1401 out: 1402 for (i = 0; i < adev->firmware.max_ucodes; i++) { 1403 ucode = &adev->firmware.ucode[i]; 1404 if (!ucode->fw) 1405 continue; 1406 1407 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1408 (psp_smu_reload_quirk(psp) || 1409 psp->autoload_supported || 1410 psp->pmfw_centralized_cstate_management)) 1411 continue; 1412 1413 if (amdgpu_sriov_vf(adev) && 1414 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 1415 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 1416 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 1417 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 1418 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 1419 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 1420 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 1421 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 1422 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G 1423 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 1424 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 1425 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 1426 || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) 1427 /*skip ucode loading in SRIOV VF */ 1428 continue; 1429 1430 if (psp->autoload_supported && 1431 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 1432 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 1433 /* skip mec JT when autoload is enabled */ 1434 continue; 1435 1436 psp_print_fw_hdr(psp, ucode); 1437 1438 ret = psp_execute_np_fw_load(psp, ucode); 1439 if (ret) 1440 return ret; 1441 1442 /* Start rlc autoload after psp recieved all the gfx firmware */ 1443 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 1444 AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { 1445 ret = psp_rlc_autoload(psp); 1446 if (ret) { 1447 DRM_ERROR("Failed to start rlc autoload\n"); 1448 return ret; 1449 } 1450 } 1451 #if 0 1452 /* check if firmware loaded sucessfully */ 1453 if (!amdgpu_psp_check_fw_loading_status(adev, i)) 1454 return -EINVAL; 1455 #endif 1456 } 1457 1458 return 0; 1459 } 1460 1461 static int psp_load_fw(struct amdgpu_device *adev) 1462 { 1463 int ret; 1464 struct psp_context *psp = &adev->psp; 1465 1466 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { 1467 psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ 1468 goto skip_memalloc; 1469 } 1470 1471 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1472 if (!psp->cmd) 1473 return -ENOMEM; 1474 1475 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 1476 AMDGPU_GEM_DOMAIN_GTT, 1477 &psp->fw_pri_bo, 1478 &psp->fw_pri_mc_addr, 1479 &psp->fw_pri_buf); 1480 if (ret) 1481 goto failed; 1482 1483 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 1484 AMDGPU_GEM_DOMAIN_VRAM, 1485 &psp->fence_buf_bo, 1486 &psp->fence_buf_mc_addr, 1487 &psp->fence_buf); 1488 if (ret) 1489 goto failed; 1490 1491 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 1492 AMDGPU_GEM_DOMAIN_VRAM, 1493 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1494 (void **)&psp->cmd_buf_mem); 1495 if (ret) 1496 goto failed; 1497 1498 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 1499 1500 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 1501 if (ret) { 1502 DRM_ERROR("PSP ring init failed!\n"); 1503 goto failed; 1504 } 1505 1506 skip_memalloc: 1507 ret = psp_hw_start(psp); 1508 if (ret) 1509 goto failed; 1510 1511 ret = psp_np_fw_load(psp); 1512 if (ret) 1513 goto failed; 1514 1515 ret = psp_asd_load(psp); 1516 if (ret) { 1517 DRM_ERROR("PSP load asd failed!\n"); 1518 return ret; 1519 } 1520 1521 if (psp->adev->psp.ta_fw) { 1522 ret = psp_ras_initialize(psp); 1523 if (ret) 1524 dev_err(psp->adev->dev, 1525 "RAS: Failed to initialize RAS\n"); 1526 1527 ret = psp_hdcp_initialize(psp); 1528 if (ret) 1529 dev_err(psp->adev->dev, 1530 "HDCP: Failed to initialize HDCP\n"); 1531 1532 ret = psp_dtm_initialize(psp); 1533 if (ret) 1534 dev_err(psp->adev->dev, 1535 "DTM: Failed to initialize DTM\n"); 1536 } 1537 1538 return 0; 1539 1540 failed: 1541 /* 1542 * all cleanup jobs (xgmi terminate, ras terminate, 1543 * ring destroy, cmd/fence/fw buffers destory, 1544 * psp->cmd destory) are delayed to psp_hw_fini 1545 */ 1546 return ret; 1547 } 1548 1549 static int psp_hw_init(void *handle) 1550 { 1551 int ret; 1552 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1553 1554 mutex_lock(&adev->firmware.mutex); 1555 /* 1556 * This sequence is just used on hw_init only once, no need on 1557 * resume. 1558 */ 1559 ret = amdgpu_ucode_init_bo(adev); 1560 if (ret) 1561 goto failed; 1562 1563 ret = psp_load_fw(adev); 1564 if (ret) { 1565 DRM_ERROR("PSP firmware loading failed\n"); 1566 goto failed; 1567 } 1568 1569 mutex_unlock(&adev->firmware.mutex); 1570 return 0; 1571 1572 failed: 1573 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 1574 mutex_unlock(&adev->firmware.mutex); 1575 return -EINVAL; 1576 } 1577 1578 static int psp_hw_fini(void *handle) 1579 { 1580 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1581 struct psp_context *psp = &adev->psp; 1582 void *tmr_buf; 1583 void **pptr; 1584 1585 if (psp->adev->psp.ta_fw) { 1586 psp_ras_terminate(psp); 1587 psp_dtm_terminate(psp); 1588 psp_hdcp_terminate(psp); 1589 } 1590 1591 psp_asd_unload(psp); 1592 1593 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 1594 1595 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 1596 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 1597 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 1598 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 1599 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 1600 &psp->fence_buf_mc_addr, &psp->fence_buf); 1601 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1602 (void **)&psp->cmd_buf_mem); 1603 1604 kfree(psp->cmd); 1605 psp->cmd = NULL; 1606 1607 return 0; 1608 } 1609 1610 static int psp_suspend(void *handle) 1611 { 1612 int ret; 1613 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1614 struct psp_context *psp = &adev->psp; 1615 1616 if (adev->gmc.xgmi.num_physical_nodes > 1 && 1617 psp->xgmi_context.initialized == 1) { 1618 ret = psp_xgmi_terminate(psp); 1619 if (ret) { 1620 DRM_ERROR("Failed to terminate xgmi ta\n"); 1621 return ret; 1622 } 1623 } 1624 1625 if (psp->adev->psp.ta_fw) { 1626 ret = psp_ras_terminate(psp); 1627 if (ret) { 1628 DRM_ERROR("Failed to terminate ras ta\n"); 1629 return ret; 1630 } 1631 ret = psp_hdcp_terminate(psp); 1632 if (ret) { 1633 DRM_ERROR("Failed to terminate hdcp ta\n"); 1634 return ret; 1635 } 1636 ret = psp_dtm_terminate(psp); 1637 if (ret) { 1638 DRM_ERROR("Failed to terminate dtm ta\n"); 1639 return ret; 1640 } 1641 } 1642 1643 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 1644 if (ret) { 1645 DRM_ERROR("PSP ring stop failed\n"); 1646 return ret; 1647 } 1648 1649 return 0; 1650 } 1651 1652 static int psp_resume(void *handle) 1653 { 1654 int ret; 1655 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1656 struct psp_context *psp = &adev->psp; 1657 1658 DRM_INFO("PSP is resuming...\n"); 1659 1660 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 1661 if (ret) { 1662 DRM_ERROR("Failed to process memory training!\n"); 1663 return ret; 1664 } 1665 1666 mutex_lock(&adev->firmware.mutex); 1667 1668 ret = psp_hw_start(psp); 1669 if (ret) 1670 goto failed; 1671 1672 ret = psp_np_fw_load(psp); 1673 if (ret) 1674 goto failed; 1675 1676 ret = psp_asd_load(psp); 1677 if (ret) { 1678 DRM_ERROR("PSP load asd failed!\n"); 1679 goto failed; 1680 } 1681 1682 if (adev->gmc.xgmi.num_physical_nodes > 1) { 1683 ret = psp_xgmi_initialize(psp); 1684 /* Warning the XGMI seesion initialize failure 1685 * Instead of stop driver initialization 1686 */ 1687 if (ret) 1688 dev_err(psp->adev->dev, 1689 "XGMI: Failed to initialize XGMI session\n"); 1690 } 1691 1692 if (psp->adev->psp.ta_fw) { 1693 ret = psp_ras_initialize(psp); 1694 if (ret) 1695 dev_err(psp->adev->dev, 1696 "RAS: Failed to initialize RAS\n"); 1697 1698 ret = psp_hdcp_initialize(psp); 1699 if (ret) 1700 dev_err(psp->adev->dev, 1701 "HDCP: Failed to initialize HDCP\n"); 1702 1703 ret = psp_dtm_initialize(psp); 1704 if (ret) 1705 dev_err(psp->adev->dev, 1706 "DTM: Failed to initialize DTM\n"); 1707 } 1708 1709 mutex_unlock(&adev->firmware.mutex); 1710 1711 return 0; 1712 1713 failed: 1714 DRM_ERROR("PSP resume failed\n"); 1715 mutex_unlock(&adev->firmware.mutex); 1716 return ret; 1717 } 1718 1719 int psp_gpu_reset(struct amdgpu_device *adev) 1720 { 1721 int ret; 1722 1723 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1724 return 0; 1725 1726 mutex_lock(&adev->psp.mutex); 1727 ret = psp_mode1_reset(&adev->psp); 1728 mutex_unlock(&adev->psp.mutex); 1729 1730 return ret; 1731 } 1732 1733 int psp_rlc_autoload_start(struct psp_context *psp) 1734 { 1735 int ret; 1736 struct psp_gfx_cmd_resp *cmd; 1737 1738 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1739 if (!cmd) 1740 return -ENOMEM; 1741 1742 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 1743 1744 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1745 psp->fence_buf_mc_addr); 1746 kfree(cmd); 1747 return ret; 1748 } 1749 1750 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, 1751 uint64_t cmd_gpu_addr, int cmd_size) 1752 { 1753 struct amdgpu_firmware_info ucode = {0}; 1754 1755 ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : 1756 AMDGPU_UCODE_ID_VCN0_RAM; 1757 ucode.mc_addr = cmd_gpu_addr; 1758 ucode.ucode_size = cmd_size; 1759 1760 return psp_execute_np_fw_load(&adev->psp, &ucode); 1761 } 1762 1763 int psp_ring_cmd_submit(struct psp_context *psp, 1764 uint64_t cmd_buf_mc_addr, 1765 uint64_t fence_mc_addr, 1766 int index) 1767 { 1768 unsigned int psp_write_ptr_reg = 0; 1769 struct psp_gfx_rb_frame *write_frame; 1770 struct psp_ring *ring = &psp->km_ring; 1771 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 1772 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 1773 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 1774 struct amdgpu_device *adev = psp->adev; 1775 uint32_t ring_size_dw = ring->ring_size / 4; 1776 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 1777 1778 /* KM (GPCOM) prepare write pointer */ 1779 psp_write_ptr_reg = psp_ring_get_wptr(psp); 1780 1781 /* Update KM RB frame pointer to new frame */ 1782 /* write_frame ptr increments by size of rb_frame in bytes */ 1783 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 1784 if ((psp_write_ptr_reg % ring_size_dw) == 0) 1785 write_frame = ring_buffer_start; 1786 else 1787 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 1788 /* Check invalid write_frame ptr address */ 1789 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 1790 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 1791 ring_buffer_start, ring_buffer_end, write_frame); 1792 DRM_ERROR("write_frame is pointing to address out of bounds\n"); 1793 return -EINVAL; 1794 } 1795 1796 /* Initialize KM RB frame */ 1797 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 1798 1799 /* Update KM RB frame */ 1800 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 1801 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 1802 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 1803 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 1804 write_frame->fence_value = index; 1805 amdgpu_asic_flush_hdp(adev, NULL); 1806 1807 /* Update the write Pointer in DWORDs */ 1808 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 1809 psp_ring_set_wptr(psp, psp_write_ptr_reg); 1810 return 0; 1811 } 1812 1813 static bool psp_check_fw_loading_status(struct amdgpu_device *adev, 1814 enum AMDGPU_UCODE_ID ucode_type) 1815 { 1816 struct amdgpu_firmware_info *ucode = NULL; 1817 1818 if (!adev->firmware.fw_size) 1819 return false; 1820 1821 ucode = &adev->firmware.ucode[ucode_type]; 1822 if (!ucode->fw || !ucode->ucode_size) 1823 return false; 1824 1825 return psp_compare_sram_data(&adev->psp, ucode, ucode_type); 1826 } 1827 1828 static int psp_set_clockgating_state(void *handle, 1829 enum amd_clockgating_state state) 1830 { 1831 return 0; 1832 } 1833 1834 static int psp_set_powergating_state(void *handle, 1835 enum amd_powergating_state state) 1836 { 1837 return 0; 1838 } 1839 1840 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 1841 struct device_attribute *attr, 1842 char *buf) 1843 { 1844 struct drm_device *ddev = dev_get_drvdata(dev); 1845 struct amdgpu_device *adev = ddev->dev_private; 1846 uint32_t fw_ver; 1847 int ret; 1848 1849 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 1850 DRM_INFO("PSP block is not ready yet."); 1851 return -EBUSY; 1852 } 1853 1854 mutex_lock(&adev->psp.mutex); 1855 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 1856 mutex_unlock(&adev->psp.mutex); 1857 1858 if (ret) { 1859 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); 1860 return ret; 1861 } 1862 1863 return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); 1864 } 1865 1866 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 1867 struct device_attribute *attr, 1868 const char *buf, 1869 size_t count) 1870 { 1871 struct drm_device *ddev = dev_get_drvdata(dev); 1872 struct amdgpu_device *adev = ddev->dev_private; 1873 void *cpu_addr; 1874 dma_addr_t dma_addr; 1875 int ret; 1876 char fw_name[100]; 1877 const struct firmware *usbc_pd_fw; 1878 1879 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 1880 DRM_INFO("PSP block is not ready yet."); 1881 return -EBUSY; 1882 } 1883 1884 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 1885 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 1886 if (ret) 1887 goto fail; 1888 1889 /* We need contiguous physical mem to place the FW for psp to access */ 1890 cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); 1891 1892 ret = dma_mapping_error(adev->dev, dma_addr); 1893 if (ret) 1894 goto rel_buf; 1895 1896 memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 1897 1898 /* 1899 * x86 specific workaround. 1900 * Without it the buffer is invisible in PSP. 1901 * 1902 * TODO Remove once PSP starts snooping CPU cache 1903 */ 1904 #ifdef CONFIG_X86 1905 clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); 1906 #endif 1907 1908 mutex_lock(&adev->psp.mutex); 1909 ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); 1910 mutex_unlock(&adev->psp.mutex); 1911 1912 rel_buf: 1913 dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); 1914 release_firmware(usbc_pd_fw); 1915 1916 fail: 1917 if (ret) { 1918 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); 1919 return ret; 1920 } 1921 1922 return count; 1923 } 1924 1925 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, 1926 psp_usbc_pd_fw_sysfs_read, 1927 psp_usbc_pd_fw_sysfs_write); 1928 1929 1930 1931 const struct amd_ip_funcs psp_ip_funcs = { 1932 .name = "psp", 1933 .early_init = psp_early_init, 1934 .late_init = NULL, 1935 .sw_init = psp_sw_init, 1936 .sw_fini = psp_sw_fini, 1937 .hw_init = psp_hw_init, 1938 .hw_fini = psp_hw_fini, 1939 .suspend = psp_suspend, 1940 .resume = psp_resume, 1941 .is_idle = NULL, 1942 .check_soft_reset = NULL, 1943 .wait_for_idle = NULL, 1944 .soft_reset = NULL, 1945 .set_clockgating_state = psp_set_clockgating_state, 1946 .set_powergating_state = psp_set_powergating_state, 1947 }; 1948 1949 static int psp_sysfs_init(struct amdgpu_device *adev) 1950 { 1951 int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); 1952 1953 if (ret) 1954 DRM_ERROR("Failed to create USBC PD FW control file!"); 1955 1956 return ret; 1957 } 1958 1959 static void psp_sysfs_fini(struct amdgpu_device *adev) 1960 { 1961 device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); 1962 } 1963 1964 static const struct amdgpu_psp_funcs psp_funcs = { 1965 .check_fw_loading_status = psp_check_fw_loading_status, 1966 }; 1967 1968 static void psp_set_funcs(struct amdgpu_device *adev) 1969 { 1970 if (NULL == adev->firmware.funcs) 1971 adev->firmware.funcs = &psp_funcs; 1972 } 1973 1974 const struct amdgpu_ip_block_version psp_v3_1_ip_block = 1975 { 1976 .type = AMD_IP_BLOCK_TYPE_PSP, 1977 .major = 3, 1978 .minor = 1, 1979 .rev = 0, 1980 .funcs = &psp_ip_funcs, 1981 }; 1982 1983 const struct amdgpu_ip_block_version psp_v10_0_ip_block = 1984 { 1985 .type = AMD_IP_BLOCK_TYPE_PSP, 1986 .major = 10, 1987 .minor = 0, 1988 .rev = 0, 1989 .funcs = &psp_ip_funcs, 1990 }; 1991 1992 const struct amdgpu_ip_block_version psp_v11_0_ip_block = 1993 { 1994 .type = AMD_IP_BLOCK_TYPE_PSP, 1995 .major = 11, 1996 .minor = 0, 1997 .rev = 0, 1998 .funcs = &psp_ip_funcs, 1999 }; 2000 2001 const struct amdgpu_ip_block_version psp_v12_0_ip_block = 2002 { 2003 .type = AMD_IP_BLOCK_TYPE_PSP, 2004 .major = 12, 2005 .minor = 0, 2006 .rev = 0, 2007 .funcs = &psp_ip_funcs, 2008 }; 2009