1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <linux/dma-mapping.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "soc15_common.h" 33 #include "psp_v3_1.h" 34 #include "psp_v10_0.h" 35 #include "psp_v11_0.h" 36 #include "psp_v12_0.h" 37 38 #include "amdgpu_ras.h" 39 40 static void psp_set_funcs(struct amdgpu_device *adev); 41 42 static int psp_sysfs_init(struct amdgpu_device *adev); 43 static void psp_sysfs_fini(struct amdgpu_device *adev); 44 45 /* 46 * Due to DF Cstate management centralized to PMFW, the firmware 47 * loading sequence will be updated as below: 48 * - Load KDB 49 * - Load SYS_DRV 50 * - Load tOS 51 * - Load PMFW 52 * - Setup TMR 53 * - Load other non-psp fw 54 * - Load ASD 55 * - Load XGMI/RAS/HDCP/DTM TA if any 56 * 57 * This new sequence is required for 58 * - Arcturus 59 * - Navi12 and onwards 60 */ 61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 62 { 63 struct amdgpu_device *adev = psp->adev; 64 65 psp->pmfw_centralized_cstate_management = false; 66 67 if (amdgpu_sriov_vf(adev)) 68 return; 69 70 if (adev->flags & AMD_IS_APU) 71 return; 72 73 if ((adev->asic_type == CHIP_ARCTURUS) || 74 (adev->asic_type >= CHIP_NAVI12)) 75 psp->pmfw_centralized_cstate_management = true; 76 } 77 78 static int psp_early_init(void *handle) 79 { 80 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 81 struct psp_context *psp = &adev->psp; 82 83 psp_set_funcs(adev); 84 85 switch (adev->asic_type) { 86 case CHIP_VEGA10: 87 case CHIP_VEGA12: 88 psp_v3_1_set_psp_funcs(psp); 89 psp->autoload_supported = false; 90 break; 91 case CHIP_RAVEN: 92 psp_v10_0_set_psp_funcs(psp); 93 psp->autoload_supported = false; 94 break; 95 case CHIP_VEGA20: 96 case CHIP_ARCTURUS: 97 psp_v11_0_set_psp_funcs(psp); 98 psp->autoload_supported = false; 99 break; 100 case CHIP_NAVI10: 101 case CHIP_NAVI14: 102 case CHIP_NAVI12: 103 psp_v11_0_set_psp_funcs(psp); 104 psp->autoload_supported = true; 105 break; 106 case CHIP_RENOIR: 107 psp_v12_0_set_psp_funcs(psp); 108 break; 109 default: 110 return -EINVAL; 111 } 112 113 psp->adev = adev; 114 115 psp_check_pmfw_centralized_cstate_management(psp); 116 117 return 0; 118 } 119 120 static int psp_sw_init(void *handle) 121 { 122 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 123 struct psp_context *psp = &adev->psp; 124 int ret; 125 126 ret = psp_init_microcode(psp); 127 if (ret) { 128 DRM_ERROR("Failed to load psp firmware!\n"); 129 return ret; 130 } 131 132 ret = psp_mem_training_init(psp); 133 if (ret) { 134 DRM_ERROR("Failed to initialize memory training!\n"); 135 return ret; 136 } 137 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 138 if (ret) { 139 DRM_ERROR("Failed to process memory training!\n"); 140 return ret; 141 } 142 143 if (adev->asic_type == CHIP_NAVI10) { 144 ret= psp_sysfs_init(adev); 145 if (ret) { 146 return ret; 147 } 148 } 149 150 return 0; 151 } 152 153 static int psp_sw_fini(void *handle) 154 { 155 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 156 157 psp_mem_training_fini(&adev->psp); 158 release_firmware(adev->psp.sos_fw); 159 adev->psp.sos_fw = NULL; 160 release_firmware(adev->psp.asd_fw); 161 adev->psp.asd_fw = NULL; 162 if (adev->psp.ta_fw) { 163 release_firmware(adev->psp.ta_fw); 164 adev->psp.ta_fw = NULL; 165 } 166 167 if (adev->asic_type == CHIP_NAVI10) 168 psp_sysfs_fini(adev); 169 170 return 0; 171 } 172 173 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 174 uint32_t reg_val, uint32_t mask, bool check_changed) 175 { 176 uint32_t val; 177 int i; 178 struct amdgpu_device *adev = psp->adev; 179 180 for (i = 0; i < adev->usec_timeout; i++) { 181 val = RREG32(reg_index); 182 if (check_changed) { 183 if (val != reg_val) 184 return 0; 185 } else { 186 if ((val & mask) == reg_val) 187 return 0; 188 } 189 udelay(1); 190 } 191 192 return -ETIME; 193 } 194 195 static int 196 psp_cmd_submit_buf(struct psp_context *psp, 197 struct amdgpu_firmware_info *ucode, 198 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 199 { 200 int ret; 201 int index; 202 int timeout = 2000; 203 bool ras_intr = false; 204 205 mutex_lock(&psp->mutex); 206 207 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 208 209 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 210 211 index = atomic_inc_return(&psp->fence_value); 212 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 213 if (ret) { 214 atomic_dec(&psp->fence_value); 215 mutex_unlock(&psp->mutex); 216 return ret; 217 } 218 219 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 220 while (*((unsigned int *)psp->fence_buf) != index) { 221 if (--timeout == 0) 222 break; 223 /* 224 * Shouldn't wait for timeout when err_event_athub occurs, 225 * because gpu reset thread triggered and lock resource should 226 * be released for psp resume sequence. 227 */ 228 ras_intr = amdgpu_ras_intr_triggered(); 229 if (ras_intr) 230 break; 231 msleep(1); 232 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 233 } 234 235 /* In some cases, psp response status is not 0 even there is no 236 * problem while the command is submitted. Some version of PSP FW 237 * doesn't write 0 to that field. 238 * So here we would like to only print a warning instead of an error 239 * during psp initialization to avoid breaking hw_init and it doesn't 240 * return -EINVAL. 241 */ 242 if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 243 if (ucode) 244 DRM_WARN("failed to load ucode id (%d) ", 245 ucode->ucode_id); 246 DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n", 247 psp->cmd_buf_mem->cmd_id, 248 psp->cmd_buf_mem->resp.status); 249 if (!timeout) { 250 mutex_unlock(&psp->mutex); 251 return -EINVAL; 252 } 253 } 254 255 /* get xGMI session id from response buffer */ 256 cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id; 257 258 if (ucode) { 259 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 260 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 261 } 262 mutex_unlock(&psp->mutex); 263 264 return ret; 265 } 266 267 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 268 struct psp_gfx_cmd_resp *cmd, 269 uint64_t tmr_mc, uint32_t size) 270 { 271 if (psp_support_vmr_ring(psp)) 272 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 273 else 274 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 275 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 276 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 277 cmd->cmd.cmd_setup_tmr.buf_size = size; 278 } 279 280 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 281 uint64_t pri_buf_mc, uint32_t size) 282 { 283 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 284 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 285 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 286 cmd->cmd.cmd_load_toc.toc_size = size; 287 } 288 289 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 290 static int psp_load_toc(struct psp_context *psp, 291 uint32_t *tmr_size) 292 { 293 int ret; 294 struct psp_gfx_cmd_resp *cmd; 295 296 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 297 if (!cmd) 298 return -ENOMEM; 299 /* Copy toc to psp firmware private buffer */ 300 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 301 memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size); 302 303 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size); 304 305 ret = psp_cmd_submit_buf(psp, NULL, cmd, 306 psp->fence_buf_mc_addr); 307 if (!ret) 308 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 309 kfree(cmd); 310 return ret; 311 } 312 313 /* Set up Trusted Memory Region */ 314 static int psp_tmr_init(struct psp_context *psp) 315 { 316 int ret; 317 int tmr_size; 318 void *tmr_buf; 319 void **pptr; 320 321 /* 322 * According to HW engineer, they prefer the TMR address be "naturally 323 * aligned" , e.g. the start address be an integer divide of TMR size. 324 * 325 * Note: this memory need be reserved till the driver 326 * uninitializes. 327 */ 328 tmr_size = PSP_TMR_SIZE; 329 330 /* For ASICs support RLC autoload, psp will parse the toc 331 * and calculate the total size of TMR needed */ 332 if (!amdgpu_sriov_vf(psp->adev) && 333 psp->toc_start_addr && 334 psp->toc_bin_size && 335 psp->fw_pri_buf) { 336 ret = psp_load_toc(psp, &tmr_size); 337 if (ret) { 338 DRM_ERROR("Failed to load toc\n"); 339 return ret; 340 } 341 } 342 343 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 344 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE, 345 AMDGPU_GEM_DOMAIN_VRAM, 346 &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 347 348 return ret; 349 } 350 351 static int psp_tmr_load(struct psp_context *psp) 352 { 353 int ret; 354 struct psp_gfx_cmd_resp *cmd; 355 356 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 357 if (!cmd) 358 return -ENOMEM; 359 360 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, 361 amdgpu_bo_size(psp->tmr_bo)); 362 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 363 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 364 365 ret = psp_cmd_submit_buf(psp, NULL, cmd, 366 psp->fence_buf_mc_addr); 367 368 kfree(cmd); 369 370 return ret; 371 } 372 373 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 374 uint64_t asd_mc, uint32_t size) 375 { 376 cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; 377 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); 378 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); 379 cmd->cmd.cmd_load_ta.app_len = size; 380 381 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; 382 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; 383 cmd->cmd.cmd_load_ta.cmd_buf_len = 0; 384 } 385 386 static int psp_asd_load(struct psp_context *psp) 387 { 388 int ret; 389 struct psp_gfx_cmd_resp *cmd; 390 391 /* If PSP version doesn't match ASD version, asd loading will be failed. 392 * add workaround to bypass it for sriov now. 393 * TODO: add version check to make it common 394 */ 395 if (amdgpu_sriov_vf(psp->adev)) 396 return 0; 397 398 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 399 if (!cmd) 400 return -ENOMEM; 401 402 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 403 memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); 404 405 psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 406 psp->asd_ucode_size); 407 408 ret = psp_cmd_submit_buf(psp, NULL, cmd, 409 psp->fence_buf_mc_addr); 410 if (!ret) { 411 psp->asd_context.asd_initialized = true; 412 psp->asd_context.session_id = cmd->resp.session_id; 413 } 414 415 kfree(cmd); 416 417 return ret; 418 } 419 420 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 421 uint32_t session_id) 422 { 423 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 424 cmd->cmd.cmd_unload_ta.session_id = session_id; 425 } 426 427 static int psp_asd_unload(struct psp_context *psp) 428 { 429 int ret; 430 struct psp_gfx_cmd_resp *cmd; 431 432 if (amdgpu_sriov_vf(psp->adev)) 433 return 0; 434 435 if (!psp->asd_context.asd_initialized) 436 return 0; 437 438 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 439 if (!cmd) 440 return -ENOMEM; 441 442 psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); 443 444 ret = psp_cmd_submit_buf(psp, NULL, cmd, 445 psp->fence_buf_mc_addr); 446 if (!ret) 447 psp->asd_context.asd_initialized = false; 448 449 kfree(cmd); 450 451 return ret; 452 } 453 454 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 455 uint32_t id, uint32_t value) 456 { 457 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 458 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 459 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 460 } 461 462 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 463 uint32_t value) 464 { 465 struct psp_gfx_cmd_resp *cmd = NULL; 466 int ret = 0; 467 468 if (reg >= PSP_REG_LAST) 469 return -EINVAL; 470 471 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 472 if (!cmd) 473 return -ENOMEM; 474 475 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 476 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 477 478 kfree(cmd); 479 return ret; 480 } 481 482 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 483 uint64_t ta_bin_mc, 484 uint32_t ta_bin_size, 485 uint64_t ta_shared_mc, 486 uint32_t ta_shared_size) 487 { 488 cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 489 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 490 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 491 cmd->cmd.cmd_load_ta.app_len = ta_bin_size; 492 493 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); 494 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); 495 cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; 496 } 497 498 static int psp_xgmi_init_shared_buf(struct psp_context *psp) 499 { 500 int ret; 501 502 /* 503 * Allocate 16k memory aligned to 4k from Frame Buffer (local 504 * physical) for xgmi ta <-> Driver 505 */ 506 ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, 507 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 508 &psp->xgmi_context.xgmi_shared_bo, 509 &psp->xgmi_context.xgmi_shared_mc_addr, 510 &psp->xgmi_context.xgmi_shared_buf); 511 512 return ret; 513 } 514 515 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 516 uint32_t ta_cmd_id, 517 uint32_t session_id) 518 { 519 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 520 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 521 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 522 } 523 524 int psp_ta_invoke(struct psp_context *psp, 525 uint32_t ta_cmd_id, 526 uint32_t session_id) 527 { 528 int ret; 529 struct psp_gfx_cmd_resp *cmd; 530 531 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 532 if (!cmd) 533 return -ENOMEM; 534 535 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); 536 537 ret = psp_cmd_submit_buf(psp, NULL, cmd, 538 psp->fence_buf_mc_addr); 539 540 kfree(cmd); 541 542 return ret; 543 } 544 545 static int psp_xgmi_load(struct psp_context *psp) 546 { 547 int ret; 548 struct psp_gfx_cmd_resp *cmd; 549 550 /* 551 * TODO: bypass the loading in sriov for now 552 */ 553 554 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 555 if (!cmd) 556 return -ENOMEM; 557 558 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 559 memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); 560 561 psp_prep_ta_load_cmd_buf(cmd, 562 psp->fw_pri_mc_addr, 563 psp->ta_xgmi_ucode_size, 564 psp->xgmi_context.xgmi_shared_mc_addr, 565 PSP_XGMI_SHARED_MEM_SIZE); 566 567 ret = psp_cmd_submit_buf(psp, NULL, cmd, 568 psp->fence_buf_mc_addr); 569 570 if (!ret) { 571 psp->xgmi_context.initialized = 1; 572 psp->xgmi_context.session_id = cmd->resp.session_id; 573 } 574 575 kfree(cmd); 576 577 return ret; 578 } 579 580 static int psp_xgmi_unload(struct psp_context *psp) 581 { 582 int ret; 583 struct psp_gfx_cmd_resp *cmd; 584 struct amdgpu_device *adev = psp->adev; 585 586 /* XGMI TA unload currently is not supported on Arcturus */ 587 if (adev->asic_type == CHIP_ARCTURUS) 588 return 0; 589 590 /* 591 * TODO: bypass the unloading in sriov for now 592 */ 593 594 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 595 if (!cmd) 596 return -ENOMEM; 597 598 psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); 599 600 ret = psp_cmd_submit_buf(psp, NULL, cmd, 601 psp->fence_buf_mc_addr); 602 603 kfree(cmd); 604 605 return ret; 606 } 607 608 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 609 { 610 return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); 611 } 612 613 int psp_xgmi_terminate(struct psp_context *psp) 614 { 615 int ret; 616 617 if (!psp->xgmi_context.initialized) 618 return 0; 619 620 ret = psp_xgmi_unload(psp); 621 if (ret) 622 return ret; 623 624 psp->xgmi_context.initialized = 0; 625 626 /* free xgmi shared memory */ 627 amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, 628 &psp->xgmi_context.xgmi_shared_mc_addr, 629 &psp->xgmi_context.xgmi_shared_buf); 630 631 return 0; 632 } 633 634 int psp_xgmi_initialize(struct psp_context *psp) 635 { 636 struct ta_xgmi_shared_memory *xgmi_cmd; 637 int ret; 638 639 if (!psp->adev->psp.ta_fw || 640 !psp->adev->psp.ta_xgmi_ucode_size || 641 !psp->adev->psp.ta_xgmi_start_addr) 642 return -ENOENT; 643 644 if (!psp->xgmi_context.initialized) { 645 ret = psp_xgmi_init_shared_buf(psp); 646 if (ret) 647 return ret; 648 } 649 650 /* Load XGMI TA */ 651 ret = psp_xgmi_load(psp); 652 if (ret) 653 return ret; 654 655 /* Initialize XGMI session */ 656 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); 657 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 658 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 659 660 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 661 662 return ret; 663 } 664 665 // ras begin 666 static int psp_ras_init_shared_buf(struct psp_context *psp) 667 { 668 int ret; 669 670 /* 671 * Allocate 16k memory aligned to 4k from Frame Buffer (local 672 * physical) for ras ta <-> Driver 673 */ 674 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE, 675 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 676 &psp->ras.ras_shared_bo, 677 &psp->ras.ras_shared_mc_addr, 678 &psp->ras.ras_shared_buf); 679 680 return ret; 681 } 682 683 static int psp_ras_load(struct psp_context *psp) 684 { 685 int ret; 686 struct psp_gfx_cmd_resp *cmd; 687 688 /* 689 * TODO: bypass the loading in sriov for now 690 */ 691 if (amdgpu_sriov_vf(psp->adev)) 692 return 0; 693 694 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 695 if (!cmd) 696 return -ENOMEM; 697 698 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 699 memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); 700 701 psp_prep_ta_load_cmd_buf(cmd, 702 psp->fw_pri_mc_addr, 703 psp->ta_ras_ucode_size, 704 psp->ras.ras_shared_mc_addr, 705 PSP_RAS_SHARED_MEM_SIZE); 706 707 ret = psp_cmd_submit_buf(psp, NULL, cmd, 708 psp->fence_buf_mc_addr); 709 710 if (!ret) { 711 psp->ras.ras_initialized = true; 712 psp->ras.session_id = cmd->resp.session_id; 713 } 714 715 kfree(cmd); 716 717 return ret; 718 } 719 720 static int psp_ras_unload(struct psp_context *psp) 721 { 722 int ret; 723 struct psp_gfx_cmd_resp *cmd; 724 725 /* 726 * TODO: bypass the unloading in sriov for now 727 */ 728 if (amdgpu_sriov_vf(psp->adev)) 729 return 0; 730 731 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 732 if (!cmd) 733 return -ENOMEM; 734 735 psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id); 736 737 ret = psp_cmd_submit_buf(psp, NULL, cmd, 738 psp->fence_buf_mc_addr); 739 740 kfree(cmd); 741 742 return ret; 743 } 744 745 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 746 { 747 /* 748 * TODO: bypass the loading in sriov for now 749 */ 750 if (amdgpu_sriov_vf(psp->adev)) 751 return 0; 752 753 return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id); 754 } 755 756 int psp_ras_enable_features(struct psp_context *psp, 757 union ta_ras_cmd_input *info, bool enable) 758 { 759 struct ta_ras_shared_memory *ras_cmd; 760 int ret; 761 762 if (!psp->ras.ras_initialized) 763 return -EINVAL; 764 765 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 766 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 767 768 if (enable) 769 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 770 else 771 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 772 773 ras_cmd->ras_in_message = *info; 774 775 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 776 if (ret) 777 return -EINVAL; 778 779 return ras_cmd->ras_status; 780 } 781 782 static int psp_ras_terminate(struct psp_context *psp) 783 { 784 int ret; 785 786 /* 787 * TODO: bypass the terminate in sriov for now 788 */ 789 if (amdgpu_sriov_vf(psp->adev)) 790 return 0; 791 792 if (!psp->ras.ras_initialized) 793 return 0; 794 795 ret = psp_ras_unload(psp); 796 if (ret) 797 return ret; 798 799 psp->ras.ras_initialized = false; 800 801 /* free ras shared memory */ 802 amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, 803 &psp->ras.ras_shared_mc_addr, 804 &psp->ras.ras_shared_buf); 805 806 return 0; 807 } 808 809 static int psp_ras_initialize(struct psp_context *psp) 810 { 811 int ret; 812 813 /* 814 * TODO: bypass the initialize in sriov for now 815 */ 816 if (amdgpu_sriov_vf(psp->adev)) 817 return 0; 818 819 if (!psp->adev->psp.ta_ras_ucode_size || 820 !psp->adev->psp.ta_ras_start_addr) { 821 dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n"); 822 return 0; 823 } 824 825 if (!psp->ras.ras_initialized) { 826 ret = psp_ras_init_shared_buf(psp); 827 if (ret) 828 return ret; 829 } 830 831 ret = psp_ras_load(psp); 832 if (ret) 833 return ret; 834 835 return 0; 836 } 837 // ras end 838 839 // HDCP start 840 static int psp_hdcp_init_shared_buf(struct psp_context *psp) 841 { 842 int ret; 843 844 /* 845 * Allocate 16k memory aligned to 4k from Frame Buffer (local 846 * physical) for hdcp ta <-> Driver 847 */ 848 ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, 849 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 850 &psp->hdcp_context.hdcp_shared_bo, 851 &psp->hdcp_context.hdcp_shared_mc_addr, 852 &psp->hdcp_context.hdcp_shared_buf); 853 854 return ret; 855 } 856 857 static int psp_hdcp_load(struct psp_context *psp) 858 { 859 int ret; 860 struct psp_gfx_cmd_resp *cmd; 861 862 /* 863 * TODO: bypass the loading in sriov for now 864 */ 865 if (amdgpu_sriov_vf(psp->adev)) 866 return 0; 867 868 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 869 if (!cmd) 870 return -ENOMEM; 871 872 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 873 memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, 874 psp->ta_hdcp_ucode_size); 875 876 psp_prep_ta_load_cmd_buf(cmd, 877 psp->fw_pri_mc_addr, 878 psp->ta_hdcp_ucode_size, 879 psp->hdcp_context.hdcp_shared_mc_addr, 880 PSP_HDCP_SHARED_MEM_SIZE); 881 882 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 883 884 if (!ret) { 885 psp->hdcp_context.hdcp_initialized = true; 886 psp->hdcp_context.session_id = cmd->resp.session_id; 887 } 888 889 kfree(cmd); 890 891 return ret; 892 } 893 static int psp_hdcp_initialize(struct psp_context *psp) 894 { 895 int ret; 896 897 /* 898 * TODO: bypass the initialize in sriov for now 899 */ 900 if (amdgpu_sriov_vf(psp->adev)) 901 return 0; 902 903 if (!psp->adev->psp.ta_hdcp_ucode_size || 904 !psp->adev->psp.ta_hdcp_start_addr) { 905 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 906 return 0; 907 } 908 909 if (!psp->hdcp_context.hdcp_initialized) { 910 ret = psp_hdcp_init_shared_buf(psp); 911 if (ret) 912 return ret; 913 } 914 915 ret = psp_hdcp_load(psp); 916 if (ret) 917 return ret; 918 919 return 0; 920 } 921 922 static int psp_hdcp_unload(struct psp_context *psp) 923 { 924 int ret; 925 struct psp_gfx_cmd_resp *cmd; 926 927 /* 928 * TODO: bypass the unloading in sriov for now 929 */ 930 if (amdgpu_sriov_vf(psp->adev)) 931 return 0; 932 933 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 934 if (!cmd) 935 return -ENOMEM; 936 937 psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); 938 939 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 940 941 kfree(cmd); 942 943 return ret; 944 } 945 946 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 947 { 948 /* 949 * TODO: bypass the loading in sriov for now 950 */ 951 if (amdgpu_sriov_vf(psp->adev)) 952 return 0; 953 954 return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id); 955 } 956 957 static int psp_hdcp_terminate(struct psp_context *psp) 958 { 959 int ret; 960 961 /* 962 * TODO: bypass the terminate in sriov for now 963 */ 964 if (amdgpu_sriov_vf(psp->adev)) 965 return 0; 966 967 if (!psp->hdcp_context.hdcp_initialized) 968 return 0; 969 970 ret = psp_hdcp_unload(psp); 971 if (ret) 972 return ret; 973 974 psp->hdcp_context.hdcp_initialized = false; 975 976 /* free hdcp shared memory */ 977 amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, 978 &psp->hdcp_context.hdcp_shared_mc_addr, 979 &psp->hdcp_context.hdcp_shared_buf); 980 981 return 0; 982 } 983 // HDCP end 984 985 // DTM start 986 static int psp_dtm_init_shared_buf(struct psp_context *psp) 987 { 988 int ret; 989 990 /* 991 * Allocate 16k memory aligned to 4k from Frame Buffer (local 992 * physical) for dtm ta <-> Driver 993 */ 994 ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, 995 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 996 &psp->dtm_context.dtm_shared_bo, 997 &psp->dtm_context.dtm_shared_mc_addr, 998 &psp->dtm_context.dtm_shared_buf); 999 1000 return ret; 1001 } 1002 1003 static int psp_dtm_load(struct psp_context *psp) 1004 { 1005 int ret; 1006 struct psp_gfx_cmd_resp *cmd; 1007 1008 /* 1009 * TODO: bypass the loading in sriov for now 1010 */ 1011 if (amdgpu_sriov_vf(psp->adev)) 1012 return 0; 1013 1014 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1015 if (!cmd) 1016 return -ENOMEM; 1017 1018 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1019 memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); 1020 1021 psp_prep_ta_load_cmd_buf(cmd, 1022 psp->fw_pri_mc_addr, 1023 psp->ta_dtm_ucode_size, 1024 psp->dtm_context.dtm_shared_mc_addr, 1025 PSP_DTM_SHARED_MEM_SIZE); 1026 1027 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1028 1029 if (!ret) { 1030 psp->dtm_context.dtm_initialized = true; 1031 psp->dtm_context.session_id = cmd->resp.session_id; 1032 } 1033 1034 kfree(cmd); 1035 1036 return ret; 1037 } 1038 1039 static int psp_dtm_initialize(struct psp_context *psp) 1040 { 1041 int ret; 1042 1043 /* 1044 * TODO: bypass the initialize in sriov for now 1045 */ 1046 if (amdgpu_sriov_vf(psp->adev)) 1047 return 0; 1048 1049 if (!psp->adev->psp.ta_dtm_ucode_size || 1050 !psp->adev->psp.ta_dtm_start_addr) { 1051 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1052 return 0; 1053 } 1054 1055 if (!psp->dtm_context.dtm_initialized) { 1056 ret = psp_dtm_init_shared_buf(psp); 1057 if (ret) 1058 return ret; 1059 } 1060 1061 ret = psp_dtm_load(psp); 1062 if (ret) 1063 return ret; 1064 1065 return 0; 1066 } 1067 1068 static int psp_dtm_unload(struct psp_context *psp) 1069 { 1070 int ret; 1071 struct psp_gfx_cmd_resp *cmd; 1072 1073 /* 1074 * TODO: bypass the unloading in sriov for now 1075 */ 1076 if (amdgpu_sriov_vf(psp->adev)) 1077 return 0; 1078 1079 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1080 if (!cmd) 1081 return -ENOMEM; 1082 1083 psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id); 1084 1085 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1086 1087 kfree(cmd); 1088 1089 return ret; 1090 } 1091 1092 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1093 { 1094 /* 1095 * TODO: bypass the loading in sriov for now 1096 */ 1097 if (amdgpu_sriov_vf(psp->adev)) 1098 return 0; 1099 1100 return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id); 1101 } 1102 1103 static int psp_dtm_terminate(struct psp_context *psp) 1104 { 1105 int ret; 1106 1107 /* 1108 * TODO: bypass the terminate in sriov for now 1109 */ 1110 if (amdgpu_sriov_vf(psp->adev)) 1111 return 0; 1112 1113 if (!psp->dtm_context.dtm_initialized) 1114 return 0; 1115 1116 ret = psp_dtm_unload(psp); 1117 if (ret) 1118 return ret; 1119 1120 psp->dtm_context.dtm_initialized = false; 1121 1122 /* free hdcp shared memory */ 1123 amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, 1124 &psp->dtm_context.dtm_shared_mc_addr, 1125 &psp->dtm_context.dtm_shared_buf); 1126 1127 return 0; 1128 } 1129 // DTM end 1130 1131 static int psp_hw_start(struct psp_context *psp) 1132 { 1133 struct amdgpu_device *adev = psp->adev; 1134 int ret; 1135 1136 if (!amdgpu_sriov_vf(adev)) { 1137 if (psp->kdb_bin_size && 1138 (psp->funcs->bootloader_load_kdb != NULL)) { 1139 ret = psp_bootloader_load_kdb(psp); 1140 if (ret) { 1141 DRM_ERROR("PSP load kdb failed!\n"); 1142 return ret; 1143 } 1144 } 1145 1146 ret = psp_bootloader_load_sysdrv(psp); 1147 if (ret) { 1148 DRM_ERROR("PSP load sysdrv failed!\n"); 1149 return ret; 1150 } 1151 1152 ret = psp_bootloader_load_sos(psp); 1153 if (ret) { 1154 DRM_ERROR("PSP load sos failed!\n"); 1155 return ret; 1156 } 1157 } 1158 1159 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 1160 if (ret) { 1161 DRM_ERROR("PSP create ring failed!\n"); 1162 return ret; 1163 } 1164 1165 ret = psp_tmr_init(psp); 1166 if (ret) { 1167 DRM_ERROR("PSP tmr init failed!\n"); 1168 return ret; 1169 } 1170 1171 /* 1172 * For those ASICs with DF Cstate management centralized 1173 * to PMFW, TMR setup should be performed after PMFW 1174 * loaded and before other non-psp firmware loaded. 1175 */ 1176 if (!psp->pmfw_centralized_cstate_management) { 1177 ret = psp_tmr_load(psp); 1178 if (ret) { 1179 DRM_ERROR("PSP load tmr failed!\n"); 1180 return ret; 1181 } 1182 } 1183 1184 return 0; 1185 } 1186 1187 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 1188 enum psp_gfx_fw_type *type) 1189 { 1190 switch (ucode->ucode_id) { 1191 case AMDGPU_UCODE_ID_SDMA0: 1192 *type = GFX_FW_TYPE_SDMA0; 1193 break; 1194 case AMDGPU_UCODE_ID_SDMA1: 1195 *type = GFX_FW_TYPE_SDMA1; 1196 break; 1197 case AMDGPU_UCODE_ID_SDMA2: 1198 *type = GFX_FW_TYPE_SDMA2; 1199 break; 1200 case AMDGPU_UCODE_ID_SDMA3: 1201 *type = GFX_FW_TYPE_SDMA3; 1202 break; 1203 case AMDGPU_UCODE_ID_SDMA4: 1204 *type = GFX_FW_TYPE_SDMA4; 1205 break; 1206 case AMDGPU_UCODE_ID_SDMA5: 1207 *type = GFX_FW_TYPE_SDMA5; 1208 break; 1209 case AMDGPU_UCODE_ID_SDMA6: 1210 *type = GFX_FW_TYPE_SDMA6; 1211 break; 1212 case AMDGPU_UCODE_ID_SDMA7: 1213 *type = GFX_FW_TYPE_SDMA7; 1214 break; 1215 case AMDGPU_UCODE_ID_CP_CE: 1216 *type = GFX_FW_TYPE_CP_CE; 1217 break; 1218 case AMDGPU_UCODE_ID_CP_PFP: 1219 *type = GFX_FW_TYPE_CP_PFP; 1220 break; 1221 case AMDGPU_UCODE_ID_CP_ME: 1222 *type = GFX_FW_TYPE_CP_ME; 1223 break; 1224 case AMDGPU_UCODE_ID_CP_MEC1: 1225 *type = GFX_FW_TYPE_CP_MEC; 1226 break; 1227 case AMDGPU_UCODE_ID_CP_MEC1_JT: 1228 *type = GFX_FW_TYPE_CP_MEC_ME1; 1229 break; 1230 case AMDGPU_UCODE_ID_CP_MEC2: 1231 *type = GFX_FW_TYPE_CP_MEC; 1232 break; 1233 case AMDGPU_UCODE_ID_CP_MEC2_JT: 1234 *type = GFX_FW_TYPE_CP_MEC_ME2; 1235 break; 1236 case AMDGPU_UCODE_ID_RLC_G: 1237 *type = GFX_FW_TYPE_RLC_G; 1238 break; 1239 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 1240 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 1241 break; 1242 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 1243 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 1244 break; 1245 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 1246 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 1247 break; 1248 case AMDGPU_UCODE_ID_SMC: 1249 *type = GFX_FW_TYPE_SMU; 1250 break; 1251 case AMDGPU_UCODE_ID_UVD: 1252 *type = GFX_FW_TYPE_UVD; 1253 break; 1254 case AMDGPU_UCODE_ID_UVD1: 1255 *type = GFX_FW_TYPE_UVD1; 1256 break; 1257 case AMDGPU_UCODE_ID_VCE: 1258 *type = GFX_FW_TYPE_VCE; 1259 break; 1260 case AMDGPU_UCODE_ID_VCN: 1261 *type = GFX_FW_TYPE_VCN; 1262 break; 1263 case AMDGPU_UCODE_ID_VCN1: 1264 *type = GFX_FW_TYPE_VCN1; 1265 break; 1266 case AMDGPU_UCODE_ID_DMCU_ERAM: 1267 *type = GFX_FW_TYPE_DMCU_ERAM; 1268 break; 1269 case AMDGPU_UCODE_ID_DMCU_INTV: 1270 *type = GFX_FW_TYPE_DMCU_ISR; 1271 break; 1272 case AMDGPU_UCODE_ID_VCN0_RAM: 1273 *type = GFX_FW_TYPE_VCN0_RAM; 1274 break; 1275 case AMDGPU_UCODE_ID_VCN1_RAM: 1276 *type = GFX_FW_TYPE_VCN1_RAM; 1277 break; 1278 case AMDGPU_UCODE_ID_DMCUB: 1279 *type = GFX_FW_TYPE_DMUB; 1280 break; 1281 case AMDGPU_UCODE_ID_MAXIMUM: 1282 default: 1283 return -EINVAL; 1284 } 1285 1286 return 0; 1287 } 1288 1289 static void psp_print_fw_hdr(struct psp_context *psp, 1290 struct amdgpu_firmware_info *ucode) 1291 { 1292 struct amdgpu_device *adev = psp->adev; 1293 struct common_firmware_header *hdr; 1294 1295 switch (ucode->ucode_id) { 1296 case AMDGPU_UCODE_ID_SDMA0: 1297 case AMDGPU_UCODE_ID_SDMA1: 1298 case AMDGPU_UCODE_ID_SDMA2: 1299 case AMDGPU_UCODE_ID_SDMA3: 1300 case AMDGPU_UCODE_ID_SDMA4: 1301 case AMDGPU_UCODE_ID_SDMA5: 1302 case AMDGPU_UCODE_ID_SDMA6: 1303 case AMDGPU_UCODE_ID_SDMA7: 1304 hdr = (struct common_firmware_header *) 1305 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 1306 amdgpu_ucode_print_sdma_hdr(hdr); 1307 break; 1308 case AMDGPU_UCODE_ID_CP_CE: 1309 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 1310 amdgpu_ucode_print_gfx_hdr(hdr); 1311 break; 1312 case AMDGPU_UCODE_ID_CP_PFP: 1313 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 1314 amdgpu_ucode_print_gfx_hdr(hdr); 1315 break; 1316 case AMDGPU_UCODE_ID_CP_ME: 1317 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 1318 amdgpu_ucode_print_gfx_hdr(hdr); 1319 break; 1320 case AMDGPU_UCODE_ID_CP_MEC1: 1321 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 1322 amdgpu_ucode_print_gfx_hdr(hdr); 1323 break; 1324 case AMDGPU_UCODE_ID_RLC_G: 1325 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 1326 amdgpu_ucode_print_rlc_hdr(hdr); 1327 break; 1328 case AMDGPU_UCODE_ID_SMC: 1329 hdr = (struct common_firmware_header *)adev->pm.fw->data; 1330 amdgpu_ucode_print_smc_hdr(hdr); 1331 break; 1332 default: 1333 break; 1334 } 1335 } 1336 1337 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, 1338 struct psp_gfx_cmd_resp *cmd) 1339 { 1340 int ret; 1341 uint64_t fw_mem_mc_addr = ucode->mc_addr; 1342 1343 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 1344 1345 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1346 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 1347 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 1348 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 1349 1350 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 1351 if (ret) 1352 DRM_ERROR("Unknown firmware type\n"); 1353 1354 return ret; 1355 } 1356 1357 static int psp_execute_np_fw_load(struct psp_context *psp, 1358 struct amdgpu_firmware_info *ucode) 1359 { 1360 int ret = 0; 1361 1362 ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd); 1363 if (ret) 1364 return ret; 1365 1366 ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, 1367 psp->fence_buf_mc_addr); 1368 1369 return ret; 1370 } 1371 1372 static int psp_np_fw_load(struct psp_context *psp) 1373 { 1374 int i, ret; 1375 struct amdgpu_firmware_info *ucode; 1376 struct amdgpu_device* adev = psp->adev; 1377 1378 if (psp->autoload_supported || 1379 psp->pmfw_centralized_cstate_management) { 1380 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 1381 if (!ucode->fw || amdgpu_sriov_vf(adev)) 1382 goto out; 1383 1384 ret = psp_execute_np_fw_load(psp, ucode); 1385 if (ret) 1386 return ret; 1387 } 1388 1389 if (psp->pmfw_centralized_cstate_management) { 1390 ret = psp_tmr_load(psp); 1391 if (ret) { 1392 DRM_ERROR("PSP load tmr failed!\n"); 1393 return ret; 1394 } 1395 } 1396 1397 out: 1398 for (i = 0; i < adev->firmware.max_ucodes; i++) { 1399 ucode = &adev->firmware.ucode[i]; 1400 if (!ucode->fw) 1401 continue; 1402 1403 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1404 (psp_smu_reload_quirk(psp) || 1405 psp->autoload_supported || 1406 psp->pmfw_centralized_cstate_management)) 1407 continue; 1408 1409 if (amdgpu_sriov_vf(adev) && 1410 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 1411 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 1412 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 1413 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 1414 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 1415 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 1416 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 1417 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 1418 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G 1419 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 1420 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 1421 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 1422 || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) 1423 /*skip ucode loading in SRIOV VF */ 1424 continue; 1425 1426 if (psp->autoload_supported && 1427 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 1428 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 1429 /* skip mec JT when autoload is enabled */ 1430 continue; 1431 1432 psp_print_fw_hdr(psp, ucode); 1433 1434 ret = psp_execute_np_fw_load(psp, ucode); 1435 if (ret) 1436 return ret; 1437 1438 /* Start rlc autoload after psp recieved all the gfx firmware */ 1439 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 1440 AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { 1441 ret = psp_rlc_autoload(psp); 1442 if (ret) { 1443 DRM_ERROR("Failed to start rlc autoload\n"); 1444 return ret; 1445 } 1446 } 1447 #if 0 1448 /* check if firmware loaded sucessfully */ 1449 if (!amdgpu_psp_check_fw_loading_status(adev, i)) 1450 return -EINVAL; 1451 #endif 1452 } 1453 1454 return 0; 1455 } 1456 1457 static int psp_load_fw(struct amdgpu_device *adev) 1458 { 1459 int ret; 1460 struct psp_context *psp = &adev->psp; 1461 1462 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { 1463 psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ 1464 goto skip_memalloc; 1465 } 1466 1467 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1468 if (!psp->cmd) 1469 return -ENOMEM; 1470 1471 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 1472 AMDGPU_GEM_DOMAIN_GTT, 1473 &psp->fw_pri_bo, 1474 &psp->fw_pri_mc_addr, 1475 &psp->fw_pri_buf); 1476 if (ret) 1477 goto failed; 1478 1479 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 1480 AMDGPU_GEM_DOMAIN_VRAM, 1481 &psp->fence_buf_bo, 1482 &psp->fence_buf_mc_addr, 1483 &psp->fence_buf); 1484 if (ret) 1485 goto failed; 1486 1487 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 1488 AMDGPU_GEM_DOMAIN_VRAM, 1489 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1490 (void **)&psp->cmd_buf_mem); 1491 if (ret) 1492 goto failed; 1493 1494 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 1495 1496 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 1497 if (ret) { 1498 DRM_ERROR("PSP ring init failed!\n"); 1499 goto failed; 1500 } 1501 1502 skip_memalloc: 1503 ret = psp_hw_start(psp); 1504 if (ret) 1505 goto failed; 1506 1507 ret = psp_np_fw_load(psp); 1508 if (ret) 1509 goto failed; 1510 1511 ret = psp_asd_load(psp); 1512 if (ret) { 1513 DRM_ERROR("PSP load asd failed!\n"); 1514 return ret; 1515 } 1516 1517 if (psp->adev->psp.ta_fw) { 1518 ret = psp_ras_initialize(psp); 1519 if (ret) 1520 dev_err(psp->adev->dev, 1521 "RAS: Failed to initialize RAS\n"); 1522 1523 ret = psp_hdcp_initialize(psp); 1524 if (ret) 1525 dev_err(psp->adev->dev, 1526 "HDCP: Failed to initialize HDCP\n"); 1527 1528 ret = psp_dtm_initialize(psp); 1529 if (ret) 1530 dev_err(psp->adev->dev, 1531 "DTM: Failed to initialize DTM\n"); 1532 } 1533 1534 return 0; 1535 1536 failed: 1537 /* 1538 * all cleanup jobs (xgmi terminate, ras terminate, 1539 * ring destroy, cmd/fence/fw buffers destory, 1540 * psp->cmd destory) are delayed to psp_hw_fini 1541 */ 1542 return ret; 1543 } 1544 1545 static int psp_hw_init(void *handle) 1546 { 1547 int ret; 1548 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1549 1550 mutex_lock(&adev->firmware.mutex); 1551 /* 1552 * This sequence is just used on hw_init only once, no need on 1553 * resume. 1554 */ 1555 ret = amdgpu_ucode_init_bo(adev); 1556 if (ret) 1557 goto failed; 1558 1559 ret = psp_load_fw(adev); 1560 if (ret) { 1561 DRM_ERROR("PSP firmware loading failed\n"); 1562 goto failed; 1563 } 1564 1565 mutex_unlock(&adev->firmware.mutex); 1566 return 0; 1567 1568 failed: 1569 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 1570 mutex_unlock(&adev->firmware.mutex); 1571 return -EINVAL; 1572 } 1573 1574 static int psp_hw_fini(void *handle) 1575 { 1576 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1577 struct psp_context *psp = &adev->psp; 1578 void *tmr_buf; 1579 void **pptr; 1580 1581 if (psp->adev->psp.ta_fw) { 1582 psp_ras_terminate(psp); 1583 psp_dtm_terminate(psp); 1584 psp_hdcp_terminate(psp); 1585 } 1586 1587 psp_asd_unload(psp); 1588 1589 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 1590 1591 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 1592 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 1593 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 1594 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 1595 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 1596 &psp->fence_buf_mc_addr, &psp->fence_buf); 1597 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1598 (void **)&psp->cmd_buf_mem); 1599 1600 kfree(psp->cmd); 1601 psp->cmd = NULL; 1602 1603 return 0; 1604 } 1605 1606 static int psp_suspend(void *handle) 1607 { 1608 int ret; 1609 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1610 struct psp_context *psp = &adev->psp; 1611 1612 if (adev->gmc.xgmi.num_physical_nodes > 1 && 1613 psp->xgmi_context.initialized == 1) { 1614 ret = psp_xgmi_terminate(psp); 1615 if (ret) { 1616 DRM_ERROR("Failed to terminate xgmi ta\n"); 1617 return ret; 1618 } 1619 } 1620 1621 if (psp->adev->psp.ta_fw) { 1622 ret = psp_ras_terminate(psp); 1623 if (ret) { 1624 DRM_ERROR("Failed to terminate ras ta\n"); 1625 return ret; 1626 } 1627 ret = psp_hdcp_terminate(psp); 1628 if (ret) { 1629 DRM_ERROR("Failed to terminate hdcp ta\n"); 1630 return ret; 1631 } 1632 ret = psp_dtm_terminate(psp); 1633 if (ret) { 1634 DRM_ERROR("Failed to terminate dtm ta\n"); 1635 return ret; 1636 } 1637 } 1638 1639 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 1640 if (ret) { 1641 DRM_ERROR("PSP ring stop failed\n"); 1642 return ret; 1643 } 1644 1645 return 0; 1646 } 1647 1648 static int psp_resume(void *handle) 1649 { 1650 int ret; 1651 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1652 struct psp_context *psp = &adev->psp; 1653 1654 DRM_INFO("PSP is resuming...\n"); 1655 1656 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 1657 if (ret) { 1658 DRM_ERROR("Failed to process memory training!\n"); 1659 return ret; 1660 } 1661 1662 mutex_lock(&adev->firmware.mutex); 1663 1664 ret = psp_hw_start(psp); 1665 if (ret) 1666 goto failed; 1667 1668 ret = psp_np_fw_load(psp); 1669 if (ret) 1670 goto failed; 1671 1672 ret = psp_asd_load(psp); 1673 if (ret) { 1674 DRM_ERROR("PSP load asd failed!\n"); 1675 goto failed; 1676 } 1677 1678 if (adev->gmc.xgmi.num_physical_nodes > 1) { 1679 ret = psp_xgmi_initialize(psp); 1680 /* Warning the XGMI seesion initialize failure 1681 * Instead of stop driver initialization 1682 */ 1683 if (ret) 1684 dev_err(psp->adev->dev, 1685 "XGMI: Failed to initialize XGMI session\n"); 1686 } 1687 1688 if (psp->adev->psp.ta_fw) { 1689 ret = psp_ras_initialize(psp); 1690 if (ret) 1691 dev_err(psp->adev->dev, 1692 "RAS: Failed to initialize RAS\n"); 1693 1694 ret = psp_hdcp_initialize(psp); 1695 if (ret) 1696 dev_err(psp->adev->dev, 1697 "HDCP: Failed to initialize HDCP\n"); 1698 1699 ret = psp_dtm_initialize(psp); 1700 if (ret) 1701 dev_err(psp->adev->dev, 1702 "DTM: Failed to initialize DTM\n"); 1703 } 1704 1705 mutex_unlock(&adev->firmware.mutex); 1706 1707 return 0; 1708 1709 failed: 1710 DRM_ERROR("PSP resume failed\n"); 1711 mutex_unlock(&adev->firmware.mutex); 1712 return ret; 1713 } 1714 1715 int psp_gpu_reset(struct amdgpu_device *adev) 1716 { 1717 int ret; 1718 1719 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1720 return 0; 1721 1722 mutex_lock(&adev->psp.mutex); 1723 ret = psp_mode1_reset(&adev->psp); 1724 mutex_unlock(&adev->psp.mutex); 1725 1726 return ret; 1727 } 1728 1729 int psp_rlc_autoload_start(struct psp_context *psp) 1730 { 1731 int ret; 1732 struct psp_gfx_cmd_resp *cmd; 1733 1734 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1735 if (!cmd) 1736 return -ENOMEM; 1737 1738 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 1739 1740 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1741 psp->fence_buf_mc_addr); 1742 kfree(cmd); 1743 return ret; 1744 } 1745 1746 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, 1747 uint64_t cmd_gpu_addr, int cmd_size) 1748 { 1749 struct amdgpu_firmware_info ucode = {0}; 1750 1751 ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : 1752 AMDGPU_UCODE_ID_VCN0_RAM; 1753 ucode.mc_addr = cmd_gpu_addr; 1754 ucode.ucode_size = cmd_size; 1755 1756 return psp_execute_np_fw_load(&adev->psp, &ucode); 1757 } 1758 1759 int psp_ring_cmd_submit(struct psp_context *psp, 1760 uint64_t cmd_buf_mc_addr, 1761 uint64_t fence_mc_addr, 1762 int index) 1763 { 1764 unsigned int psp_write_ptr_reg = 0; 1765 struct psp_gfx_rb_frame *write_frame; 1766 struct psp_ring *ring = &psp->km_ring; 1767 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 1768 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 1769 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 1770 struct amdgpu_device *adev = psp->adev; 1771 uint32_t ring_size_dw = ring->ring_size / 4; 1772 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 1773 1774 /* KM (GPCOM) prepare write pointer */ 1775 psp_write_ptr_reg = psp_ring_get_wptr(psp); 1776 1777 /* Update KM RB frame pointer to new frame */ 1778 /* write_frame ptr increments by size of rb_frame in bytes */ 1779 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 1780 if ((psp_write_ptr_reg % ring_size_dw) == 0) 1781 write_frame = ring_buffer_start; 1782 else 1783 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 1784 /* Check invalid write_frame ptr address */ 1785 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 1786 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 1787 ring_buffer_start, ring_buffer_end, write_frame); 1788 DRM_ERROR("write_frame is pointing to address out of bounds\n"); 1789 return -EINVAL; 1790 } 1791 1792 /* Initialize KM RB frame */ 1793 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 1794 1795 /* Update KM RB frame */ 1796 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 1797 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 1798 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 1799 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 1800 write_frame->fence_value = index; 1801 amdgpu_asic_flush_hdp(adev, NULL); 1802 1803 /* Update the write Pointer in DWORDs */ 1804 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 1805 psp_ring_set_wptr(psp, psp_write_ptr_reg); 1806 return 0; 1807 } 1808 1809 static bool psp_check_fw_loading_status(struct amdgpu_device *adev, 1810 enum AMDGPU_UCODE_ID ucode_type) 1811 { 1812 struct amdgpu_firmware_info *ucode = NULL; 1813 1814 if (!adev->firmware.fw_size) 1815 return false; 1816 1817 ucode = &adev->firmware.ucode[ucode_type]; 1818 if (!ucode->fw || !ucode->ucode_size) 1819 return false; 1820 1821 return psp_compare_sram_data(&adev->psp, ucode, ucode_type); 1822 } 1823 1824 static int psp_set_clockgating_state(void *handle, 1825 enum amd_clockgating_state state) 1826 { 1827 return 0; 1828 } 1829 1830 static int psp_set_powergating_state(void *handle, 1831 enum amd_powergating_state state) 1832 { 1833 return 0; 1834 } 1835 1836 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 1837 struct device_attribute *attr, 1838 char *buf) 1839 { 1840 struct drm_device *ddev = dev_get_drvdata(dev); 1841 struct amdgpu_device *adev = ddev->dev_private; 1842 uint32_t fw_ver; 1843 int ret; 1844 1845 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 1846 DRM_INFO("PSP block is not ready yet."); 1847 return -EBUSY; 1848 } 1849 1850 mutex_lock(&adev->psp.mutex); 1851 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 1852 mutex_unlock(&adev->psp.mutex); 1853 1854 if (ret) { 1855 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); 1856 return ret; 1857 } 1858 1859 return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); 1860 } 1861 1862 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 1863 struct device_attribute *attr, 1864 const char *buf, 1865 size_t count) 1866 { 1867 struct drm_device *ddev = dev_get_drvdata(dev); 1868 struct amdgpu_device *adev = ddev->dev_private; 1869 void *cpu_addr; 1870 dma_addr_t dma_addr; 1871 int ret; 1872 char fw_name[100]; 1873 const struct firmware *usbc_pd_fw; 1874 1875 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 1876 DRM_INFO("PSP block is not ready yet."); 1877 return -EBUSY; 1878 } 1879 1880 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 1881 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 1882 if (ret) 1883 goto fail; 1884 1885 /* We need contiguous physical mem to place the FW for psp to access */ 1886 cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); 1887 1888 ret = dma_mapping_error(adev->dev, dma_addr); 1889 if (ret) 1890 goto rel_buf; 1891 1892 memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 1893 1894 /* 1895 * x86 specific workaround. 1896 * Without it the buffer is invisible in PSP. 1897 * 1898 * TODO Remove once PSP starts snooping CPU cache 1899 */ 1900 #ifdef CONFIG_X86 1901 clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); 1902 #endif 1903 1904 mutex_lock(&adev->psp.mutex); 1905 ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); 1906 mutex_unlock(&adev->psp.mutex); 1907 1908 rel_buf: 1909 dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); 1910 release_firmware(usbc_pd_fw); 1911 1912 fail: 1913 if (ret) { 1914 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); 1915 return ret; 1916 } 1917 1918 return count; 1919 } 1920 1921 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, 1922 psp_usbc_pd_fw_sysfs_read, 1923 psp_usbc_pd_fw_sysfs_write); 1924 1925 1926 1927 const struct amd_ip_funcs psp_ip_funcs = { 1928 .name = "psp", 1929 .early_init = psp_early_init, 1930 .late_init = NULL, 1931 .sw_init = psp_sw_init, 1932 .sw_fini = psp_sw_fini, 1933 .hw_init = psp_hw_init, 1934 .hw_fini = psp_hw_fini, 1935 .suspend = psp_suspend, 1936 .resume = psp_resume, 1937 .is_idle = NULL, 1938 .check_soft_reset = NULL, 1939 .wait_for_idle = NULL, 1940 .soft_reset = NULL, 1941 .set_clockgating_state = psp_set_clockgating_state, 1942 .set_powergating_state = psp_set_powergating_state, 1943 }; 1944 1945 static int psp_sysfs_init(struct amdgpu_device *adev) 1946 { 1947 int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); 1948 1949 if (ret) 1950 DRM_ERROR("Failed to create USBC PD FW control file!"); 1951 1952 return ret; 1953 } 1954 1955 static void psp_sysfs_fini(struct amdgpu_device *adev) 1956 { 1957 device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); 1958 } 1959 1960 static const struct amdgpu_psp_funcs psp_funcs = { 1961 .check_fw_loading_status = psp_check_fw_loading_status, 1962 }; 1963 1964 static void psp_set_funcs(struct amdgpu_device *adev) 1965 { 1966 if (NULL == adev->firmware.funcs) 1967 adev->firmware.funcs = &psp_funcs; 1968 } 1969 1970 const struct amdgpu_ip_block_version psp_v3_1_ip_block = 1971 { 1972 .type = AMD_IP_BLOCK_TYPE_PSP, 1973 .major = 3, 1974 .minor = 1, 1975 .rev = 0, 1976 .funcs = &psp_ip_funcs, 1977 }; 1978 1979 const struct amdgpu_ip_block_version psp_v10_0_ip_block = 1980 { 1981 .type = AMD_IP_BLOCK_TYPE_PSP, 1982 .major = 10, 1983 .minor = 0, 1984 .rev = 0, 1985 .funcs = &psp_ip_funcs, 1986 }; 1987 1988 const struct amdgpu_ip_block_version psp_v11_0_ip_block = 1989 { 1990 .type = AMD_IP_BLOCK_TYPE_PSP, 1991 .major = 11, 1992 .minor = 0, 1993 .rev = 0, 1994 .funcs = &psp_ip_funcs, 1995 }; 1996 1997 const struct amdgpu_ip_block_version psp_v12_0_ip_block = 1998 { 1999 .type = AMD_IP_BLOCK_TYPE_PSP, 2000 .major = 12, 2001 .minor = 0, 2002 .rev = 0, 2003 .funcs = &psp_ip_funcs, 2004 }; 2005