1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <linux/dma-mapping.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "soc15_common.h" 33 #include "psp_v3_1.h" 34 #include "psp_v10_0.h" 35 #include "psp_v11_0.h" 36 #include "psp_v12_0.h" 37 38 #include "amdgpu_ras.h" 39 40 static void psp_set_funcs(struct amdgpu_device *adev); 41 42 static int psp_sysfs_init(struct amdgpu_device *adev); 43 static void psp_sysfs_fini(struct amdgpu_device *adev); 44 45 /* 46 * Due to DF Cstate management centralized to PMFW, the firmware 47 * loading sequence will be updated as below: 48 * - Load KDB 49 * - Load SYS_DRV 50 * - Load tOS 51 * - Load PMFW 52 * - Setup TMR 53 * - Load other non-psp fw 54 * - Load ASD 55 * - Load XGMI/RAS/HDCP/DTM TA if any 56 * 57 * This new sequence is required for 58 * - Arcturus 59 * - Navi12 and onwards 60 */ 61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 62 { 63 struct amdgpu_device *adev = psp->adev; 64 65 psp->pmfw_centralized_cstate_management = false; 66 67 if (amdgpu_sriov_vf(adev)) 68 return; 69 70 if (adev->flags & AMD_IS_APU) 71 return; 72 73 if ((adev->asic_type == CHIP_ARCTURUS) || 74 (adev->asic_type >= CHIP_NAVI12)) 75 psp->pmfw_centralized_cstate_management = true; 76 } 77 78 static int psp_early_init(void *handle) 79 { 80 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 81 struct psp_context *psp = &adev->psp; 82 83 psp_set_funcs(adev); 84 85 switch (adev->asic_type) { 86 case CHIP_VEGA10: 87 case CHIP_VEGA12: 88 psp_v3_1_set_psp_funcs(psp); 89 psp->autoload_supported = false; 90 break; 91 case CHIP_RAVEN: 92 psp_v10_0_set_psp_funcs(psp); 93 psp->autoload_supported = false; 94 break; 95 case CHIP_VEGA20: 96 case CHIP_ARCTURUS: 97 psp_v11_0_set_psp_funcs(psp); 98 psp->autoload_supported = false; 99 break; 100 case CHIP_NAVI10: 101 case CHIP_NAVI14: 102 case CHIP_NAVI12: 103 psp_v11_0_set_psp_funcs(psp); 104 psp->autoload_supported = true; 105 break; 106 case CHIP_RENOIR: 107 psp_v12_0_set_psp_funcs(psp); 108 break; 109 default: 110 return -EINVAL; 111 } 112 113 psp->adev = adev; 114 115 psp_check_pmfw_centralized_cstate_management(psp); 116 117 return 0; 118 } 119 120 static int psp_late_init(void *handle) 121 { 122 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 123 124 if (adev->asic_type == CHIP_NAVI10) 125 return psp_sysfs_init(adev); 126 127 return 0; 128 } 129 130 static int psp_sw_init(void *handle) 131 { 132 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 133 struct psp_context *psp = &adev->psp; 134 int ret; 135 136 ret = psp_init_microcode(psp); 137 if (ret) { 138 DRM_ERROR("Failed to load psp firmware!\n"); 139 return ret; 140 } 141 142 ret = psp_mem_training_init(psp); 143 if (ret) { 144 DRM_ERROR("Failed to initialize memory training!\n"); 145 return ret; 146 } 147 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 148 if (ret) { 149 DRM_ERROR("Failed to process memory training!\n"); 150 return ret; 151 } 152 153 return 0; 154 } 155 156 static int psp_sw_fini(void *handle) 157 { 158 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 159 160 psp_mem_training_fini(&adev->psp); 161 release_firmware(adev->psp.sos_fw); 162 adev->psp.sos_fw = NULL; 163 release_firmware(adev->psp.asd_fw); 164 adev->psp.asd_fw = NULL; 165 if (adev->psp.ta_fw) { 166 release_firmware(adev->psp.ta_fw); 167 adev->psp.ta_fw = NULL; 168 } 169 170 if (adev->asic_type == CHIP_NAVI10) 171 psp_sysfs_fini(adev); 172 173 return 0; 174 } 175 176 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 177 uint32_t reg_val, uint32_t mask, bool check_changed) 178 { 179 uint32_t val; 180 int i; 181 struct amdgpu_device *adev = psp->adev; 182 183 for (i = 0; i < adev->usec_timeout; i++) { 184 val = RREG32(reg_index); 185 if (check_changed) { 186 if (val != reg_val) 187 return 0; 188 } else { 189 if ((val & mask) == reg_val) 190 return 0; 191 } 192 udelay(1); 193 } 194 195 return -ETIME; 196 } 197 198 static int 199 psp_cmd_submit_buf(struct psp_context *psp, 200 struct amdgpu_firmware_info *ucode, 201 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 202 { 203 int ret; 204 int index; 205 int timeout = 2000; 206 207 mutex_lock(&psp->mutex); 208 209 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 210 211 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 212 213 index = atomic_inc_return(&psp->fence_value); 214 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 215 if (ret) { 216 atomic_dec(&psp->fence_value); 217 mutex_unlock(&psp->mutex); 218 return ret; 219 } 220 221 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 222 while (*((unsigned int *)psp->fence_buf) != index) { 223 if (--timeout == 0) 224 break; 225 /* 226 * Shouldn't wait for timeout when err_event_athub occurs, 227 * because gpu reset thread triggered and lock resource should 228 * be released for psp resume sequence. 229 */ 230 if (amdgpu_ras_intr_triggered()) 231 break; 232 msleep(1); 233 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 234 } 235 236 /* In some cases, psp response status is not 0 even there is no 237 * problem while the command is submitted. Some version of PSP FW 238 * doesn't write 0 to that field. 239 * So here we would like to only print a warning instead of an error 240 * during psp initialization to avoid breaking hw_init and it doesn't 241 * return -EINVAL. 242 */ 243 if (psp->cmd_buf_mem->resp.status || !timeout) { 244 if (ucode) 245 DRM_WARN("failed to load ucode id (%d) ", 246 ucode->ucode_id); 247 DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n", 248 psp->cmd_buf_mem->cmd_id, 249 psp->cmd_buf_mem->resp.status); 250 if (!timeout) { 251 mutex_unlock(&psp->mutex); 252 return -EINVAL; 253 } 254 } 255 256 /* get xGMI session id from response buffer */ 257 cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id; 258 259 if (ucode) { 260 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 261 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 262 } 263 mutex_unlock(&psp->mutex); 264 265 return ret; 266 } 267 268 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 269 struct psp_gfx_cmd_resp *cmd, 270 uint64_t tmr_mc, uint32_t size) 271 { 272 if (psp_support_vmr_ring(psp)) 273 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 274 else 275 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 276 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 277 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 278 cmd->cmd.cmd_setup_tmr.buf_size = size; 279 } 280 281 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 282 uint64_t pri_buf_mc, uint32_t size) 283 { 284 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 285 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 286 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 287 cmd->cmd.cmd_load_toc.toc_size = size; 288 } 289 290 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 291 static int psp_load_toc(struct psp_context *psp, 292 uint32_t *tmr_size) 293 { 294 int ret; 295 struct psp_gfx_cmd_resp *cmd; 296 297 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 298 if (!cmd) 299 return -ENOMEM; 300 /* Copy toc to psp firmware private buffer */ 301 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 302 memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size); 303 304 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size); 305 306 ret = psp_cmd_submit_buf(psp, NULL, cmd, 307 psp->fence_buf_mc_addr); 308 if (!ret) 309 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 310 kfree(cmd); 311 return ret; 312 } 313 314 /* Set up Trusted Memory Region */ 315 static int psp_tmr_init(struct psp_context *psp) 316 { 317 int ret; 318 int tmr_size; 319 void *tmr_buf; 320 void **pptr; 321 322 /* 323 * According to HW engineer, they prefer the TMR address be "naturally 324 * aligned" , e.g. the start address be an integer divide of TMR size. 325 * 326 * Note: this memory need be reserved till the driver 327 * uninitializes. 328 */ 329 tmr_size = PSP_TMR_SIZE; 330 331 /* For ASICs support RLC autoload, psp will parse the toc 332 * and calculate the total size of TMR needed */ 333 if (!amdgpu_sriov_vf(psp->adev) && 334 psp->toc_start_addr && 335 psp->toc_bin_size && 336 psp->fw_pri_buf) { 337 ret = psp_load_toc(psp, &tmr_size); 338 if (ret) { 339 DRM_ERROR("Failed to load toc\n"); 340 return ret; 341 } 342 } 343 344 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 345 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE, 346 AMDGPU_GEM_DOMAIN_VRAM, 347 &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 348 349 return ret; 350 } 351 352 static int psp_tmr_load(struct psp_context *psp) 353 { 354 int ret; 355 struct psp_gfx_cmd_resp *cmd; 356 357 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 358 if (!cmd) 359 return -ENOMEM; 360 361 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, 362 amdgpu_bo_size(psp->tmr_bo)); 363 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 364 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 365 366 ret = psp_cmd_submit_buf(psp, NULL, cmd, 367 psp->fence_buf_mc_addr); 368 369 kfree(cmd); 370 371 return ret; 372 } 373 374 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 375 uint64_t asd_mc, uint32_t size) 376 { 377 cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; 378 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); 379 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); 380 cmd->cmd.cmd_load_ta.app_len = size; 381 382 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; 383 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; 384 cmd->cmd.cmd_load_ta.cmd_buf_len = 0; 385 } 386 387 static int psp_asd_load(struct psp_context *psp) 388 { 389 int ret; 390 struct psp_gfx_cmd_resp *cmd; 391 392 /* If PSP version doesn't match ASD version, asd loading will be failed. 393 * add workaround to bypass it for sriov now. 394 * TODO: add version check to make it common 395 */ 396 if (amdgpu_sriov_vf(psp->adev)) 397 return 0; 398 399 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 400 if (!cmd) 401 return -ENOMEM; 402 403 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 404 memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); 405 406 psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 407 psp->asd_ucode_size); 408 409 ret = psp_cmd_submit_buf(psp, NULL, cmd, 410 psp->fence_buf_mc_addr); 411 if (!ret) { 412 psp->asd_context.asd_initialized = true; 413 psp->asd_context.session_id = cmd->resp.session_id; 414 } 415 416 kfree(cmd); 417 418 return ret; 419 } 420 421 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 422 uint32_t session_id) 423 { 424 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 425 cmd->cmd.cmd_unload_ta.session_id = session_id; 426 } 427 428 static int psp_asd_unload(struct psp_context *psp) 429 { 430 int ret; 431 struct psp_gfx_cmd_resp *cmd; 432 433 if (amdgpu_sriov_vf(psp->adev)) 434 return 0; 435 436 if (!psp->asd_context.asd_initialized) 437 return 0; 438 439 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 440 if (!cmd) 441 return -ENOMEM; 442 443 psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); 444 445 ret = psp_cmd_submit_buf(psp, NULL, cmd, 446 psp->fence_buf_mc_addr); 447 if (!ret) 448 psp->asd_context.asd_initialized = false; 449 450 kfree(cmd); 451 452 return ret; 453 } 454 455 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 456 uint32_t id, uint32_t value) 457 { 458 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 459 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 460 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 461 } 462 463 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 464 uint32_t value) 465 { 466 struct psp_gfx_cmd_resp *cmd = NULL; 467 int ret = 0; 468 469 if (reg >= PSP_REG_LAST) 470 return -EINVAL; 471 472 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 473 if (!cmd) 474 return -ENOMEM; 475 476 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 477 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 478 479 kfree(cmd); 480 return ret; 481 } 482 483 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 484 uint64_t ta_bin_mc, 485 uint32_t ta_bin_size, 486 uint64_t ta_shared_mc, 487 uint32_t ta_shared_size) 488 { 489 cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 490 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 491 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 492 cmd->cmd.cmd_load_ta.app_len = ta_bin_size; 493 494 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); 495 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); 496 cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; 497 } 498 499 static int psp_xgmi_init_shared_buf(struct psp_context *psp) 500 { 501 int ret; 502 503 /* 504 * Allocate 16k memory aligned to 4k from Frame Buffer (local 505 * physical) for xgmi ta <-> Driver 506 */ 507 ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, 508 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 509 &psp->xgmi_context.xgmi_shared_bo, 510 &psp->xgmi_context.xgmi_shared_mc_addr, 511 &psp->xgmi_context.xgmi_shared_buf); 512 513 return ret; 514 } 515 516 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 517 uint32_t ta_cmd_id, 518 uint32_t session_id) 519 { 520 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 521 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 522 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 523 } 524 525 int psp_ta_invoke(struct psp_context *psp, 526 uint32_t ta_cmd_id, 527 uint32_t session_id) 528 { 529 int ret; 530 struct psp_gfx_cmd_resp *cmd; 531 532 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 533 if (!cmd) 534 return -ENOMEM; 535 536 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); 537 538 ret = psp_cmd_submit_buf(psp, NULL, cmd, 539 psp->fence_buf_mc_addr); 540 541 kfree(cmd); 542 543 return ret; 544 } 545 546 static int psp_xgmi_load(struct psp_context *psp) 547 { 548 int ret; 549 struct psp_gfx_cmd_resp *cmd; 550 551 /* 552 * TODO: bypass the loading in sriov for now 553 */ 554 555 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 556 if (!cmd) 557 return -ENOMEM; 558 559 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 560 memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); 561 562 psp_prep_ta_load_cmd_buf(cmd, 563 psp->fw_pri_mc_addr, 564 psp->ta_xgmi_ucode_size, 565 psp->xgmi_context.xgmi_shared_mc_addr, 566 PSP_XGMI_SHARED_MEM_SIZE); 567 568 ret = psp_cmd_submit_buf(psp, NULL, cmd, 569 psp->fence_buf_mc_addr); 570 571 if (!ret) { 572 psp->xgmi_context.initialized = 1; 573 psp->xgmi_context.session_id = cmd->resp.session_id; 574 } 575 576 kfree(cmd); 577 578 return ret; 579 } 580 581 static int psp_xgmi_unload(struct psp_context *psp) 582 { 583 int ret; 584 struct psp_gfx_cmd_resp *cmd; 585 struct amdgpu_device *adev = psp->adev; 586 587 /* XGMI TA unload currently is not supported on Arcturus */ 588 if (adev->asic_type == CHIP_ARCTURUS) 589 return 0; 590 591 /* 592 * TODO: bypass the unloading in sriov for now 593 */ 594 595 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 596 if (!cmd) 597 return -ENOMEM; 598 599 psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); 600 601 ret = psp_cmd_submit_buf(psp, NULL, cmd, 602 psp->fence_buf_mc_addr); 603 604 kfree(cmd); 605 606 return ret; 607 } 608 609 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 610 { 611 return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); 612 } 613 614 int psp_xgmi_terminate(struct psp_context *psp) 615 { 616 int ret; 617 618 if (!psp->xgmi_context.initialized) 619 return 0; 620 621 ret = psp_xgmi_unload(psp); 622 if (ret) 623 return ret; 624 625 psp->xgmi_context.initialized = 0; 626 627 /* free xgmi shared memory */ 628 amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, 629 &psp->xgmi_context.xgmi_shared_mc_addr, 630 &psp->xgmi_context.xgmi_shared_buf); 631 632 return 0; 633 } 634 635 int psp_xgmi_initialize(struct psp_context *psp) 636 { 637 struct ta_xgmi_shared_memory *xgmi_cmd; 638 int ret; 639 640 if (!psp->adev->psp.ta_fw || 641 !psp->adev->psp.ta_xgmi_ucode_size || 642 !psp->adev->psp.ta_xgmi_start_addr) 643 return -ENOENT; 644 645 if (!psp->xgmi_context.initialized) { 646 ret = psp_xgmi_init_shared_buf(psp); 647 if (ret) 648 return ret; 649 } 650 651 /* Load XGMI TA */ 652 ret = psp_xgmi_load(psp); 653 if (ret) 654 return ret; 655 656 /* Initialize XGMI session */ 657 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); 658 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 659 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 660 661 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 662 663 return ret; 664 } 665 666 // ras begin 667 static int psp_ras_init_shared_buf(struct psp_context *psp) 668 { 669 int ret; 670 671 /* 672 * Allocate 16k memory aligned to 4k from Frame Buffer (local 673 * physical) for ras ta <-> Driver 674 */ 675 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE, 676 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 677 &psp->ras.ras_shared_bo, 678 &psp->ras.ras_shared_mc_addr, 679 &psp->ras.ras_shared_buf); 680 681 return ret; 682 } 683 684 static int psp_ras_load(struct psp_context *psp) 685 { 686 int ret; 687 struct psp_gfx_cmd_resp *cmd; 688 689 /* 690 * TODO: bypass the loading in sriov for now 691 */ 692 if (amdgpu_sriov_vf(psp->adev)) 693 return 0; 694 695 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 696 if (!cmd) 697 return -ENOMEM; 698 699 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 700 memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); 701 702 psp_prep_ta_load_cmd_buf(cmd, 703 psp->fw_pri_mc_addr, 704 psp->ta_ras_ucode_size, 705 psp->ras.ras_shared_mc_addr, 706 PSP_RAS_SHARED_MEM_SIZE); 707 708 ret = psp_cmd_submit_buf(psp, NULL, cmd, 709 psp->fence_buf_mc_addr); 710 711 if (!ret) { 712 psp->ras.ras_initialized = true; 713 psp->ras.session_id = cmd->resp.session_id; 714 } 715 716 kfree(cmd); 717 718 return ret; 719 } 720 721 static int psp_ras_unload(struct psp_context *psp) 722 { 723 int ret; 724 struct psp_gfx_cmd_resp *cmd; 725 726 /* 727 * TODO: bypass the unloading in sriov for now 728 */ 729 if (amdgpu_sriov_vf(psp->adev)) 730 return 0; 731 732 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 733 if (!cmd) 734 return -ENOMEM; 735 736 psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id); 737 738 ret = psp_cmd_submit_buf(psp, NULL, cmd, 739 psp->fence_buf_mc_addr); 740 741 kfree(cmd); 742 743 return ret; 744 } 745 746 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 747 { 748 /* 749 * TODO: bypass the loading in sriov for now 750 */ 751 if (amdgpu_sriov_vf(psp->adev)) 752 return 0; 753 754 return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id); 755 } 756 757 int psp_ras_enable_features(struct psp_context *psp, 758 union ta_ras_cmd_input *info, bool enable) 759 { 760 struct ta_ras_shared_memory *ras_cmd; 761 int ret; 762 763 if (!psp->ras.ras_initialized) 764 return -EINVAL; 765 766 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 767 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 768 769 if (enable) 770 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 771 else 772 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 773 774 ras_cmd->ras_in_message = *info; 775 776 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 777 if (ret) 778 return -EINVAL; 779 780 return ras_cmd->ras_status; 781 } 782 783 static int psp_ras_terminate(struct psp_context *psp) 784 { 785 int ret; 786 787 /* 788 * TODO: bypass the terminate in sriov for now 789 */ 790 if (amdgpu_sriov_vf(psp->adev)) 791 return 0; 792 793 if (!psp->ras.ras_initialized) 794 return 0; 795 796 ret = psp_ras_unload(psp); 797 if (ret) 798 return ret; 799 800 psp->ras.ras_initialized = false; 801 802 /* free ras shared memory */ 803 amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, 804 &psp->ras.ras_shared_mc_addr, 805 &psp->ras.ras_shared_buf); 806 807 return 0; 808 } 809 810 static int psp_ras_initialize(struct psp_context *psp) 811 { 812 int ret; 813 814 /* 815 * TODO: bypass the initialize in sriov for now 816 */ 817 if (amdgpu_sriov_vf(psp->adev)) 818 return 0; 819 820 if (!psp->adev->psp.ta_ras_ucode_size || 821 !psp->adev->psp.ta_ras_start_addr) { 822 dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n"); 823 return 0; 824 } 825 826 if (!psp->ras.ras_initialized) { 827 ret = psp_ras_init_shared_buf(psp); 828 if (ret) 829 return ret; 830 } 831 832 ret = psp_ras_load(psp); 833 if (ret) 834 return ret; 835 836 return 0; 837 } 838 // ras end 839 840 // HDCP start 841 static int psp_hdcp_init_shared_buf(struct psp_context *psp) 842 { 843 int ret; 844 845 /* 846 * Allocate 16k memory aligned to 4k from Frame Buffer (local 847 * physical) for hdcp ta <-> Driver 848 */ 849 ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, 850 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 851 &psp->hdcp_context.hdcp_shared_bo, 852 &psp->hdcp_context.hdcp_shared_mc_addr, 853 &psp->hdcp_context.hdcp_shared_buf); 854 855 return ret; 856 } 857 858 static int psp_hdcp_load(struct psp_context *psp) 859 { 860 int ret; 861 struct psp_gfx_cmd_resp *cmd; 862 863 /* 864 * TODO: bypass the loading in sriov for now 865 */ 866 if (amdgpu_sriov_vf(psp->adev)) 867 return 0; 868 869 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 870 if (!cmd) 871 return -ENOMEM; 872 873 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 874 memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, 875 psp->ta_hdcp_ucode_size); 876 877 psp_prep_ta_load_cmd_buf(cmd, 878 psp->fw_pri_mc_addr, 879 psp->ta_hdcp_ucode_size, 880 psp->hdcp_context.hdcp_shared_mc_addr, 881 PSP_HDCP_SHARED_MEM_SIZE); 882 883 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 884 885 if (!ret) { 886 psp->hdcp_context.hdcp_initialized = true; 887 psp->hdcp_context.session_id = cmd->resp.session_id; 888 } 889 890 kfree(cmd); 891 892 return ret; 893 } 894 static int psp_hdcp_initialize(struct psp_context *psp) 895 { 896 int ret; 897 898 /* 899 * TODO: bypass the initialize in sriov for now 900 */ 901 if (amdgpu_sriov_vf(psp->adev)) 902 return 0; 903 904 if (!psp->adev->psp.ta_hdcp_ucode_size || 905 !psp->adev->psp.ta_hdcp_start_addr) { 906 dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n"); 907 return 0; 908 } 909 910 if (!psp->hdcp_context.hdcp_initialized) { 911 ret = psp_hdcp_init_shared_buf(psp); 912 if (ret) 913 return ret; 914 } 915 916 ret = psp_hdcp_load(psp); 917 if (ret) 918 return ret; 919 920 return 0; 921 } 922 923 static int psp_hdcp_unload(struct psp_context *psp) 924 { 925 int ret; 926 struct psp_gfx_cmd_resp *cmd; 927 928 /* 929 * TODO: bypass the unloading in sriov for now 930 */ 931 if (amdgpu_sriov_vf(psp->adev)) 932 return 0; 933 934 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 935 if (!cmd) 936 return -ENOMEM; 937 938 psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); 939 940 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 941 942 kfree(cmd); 943 944 return ret; 945 } 946 947 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 948 { 949 /* 950 * TODO: bypass the loading in sriov for now 951 */ 952 if (amdgpu_sriov_vf(psp->adev)) 953 return 0; 954 955 return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id); 956 } 957 958 static int psp_hdcp_terminate(struct psp_context *psp) 959 { 960 int ret; 961 962 /* 963 * TODO: bypass the terminate in sriov for now 964 */ 965 if (amdgpu_sriov_vf(psp->adev)) 966 return 0; 967 968 if (!psp->hdcp_context.hdcp_initialized) 969 return 0; 970 971 ret = psp_hdcp_unload(psp); 972 if (ret) 973 return ret; 974 975 psp->hdcp_context.hdcp_initialized = false; 976 977 /* free hdcp shared memory */ 978 amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, 979 &psp->hdcp_context.hdcp_shared_mc_addr, 980 &psp->hdcp_context.hdcp_shared_buf); 981 982 return 0; 983 } 984 // HDCP end 985 986 // DTM start 987 static int psp_dtm_init_shared_buf(struct psp_context *psp) 988 { 989 int ret; 990 991 /* 992 * Allocate 16k memory aligned to 4k from Frame Buffer (local 993 * physical) for dtm ta <-> Driver 994 */ 995 ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, 996 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 997 &psp->dtm_context.dtm_shared_bo, 998 &psp->dtm_context.dtm_shared_mc_addr, 999 &psp->dtm_context.dtm_shared_buf); 1000 1001 return ret; 1002 } 1003 1004 static int psp_dtm_load(struct psp_context *psp) 1005 { 1006 int ret; 1007 struct psp_gfx_cmd_resp *cmd; 1008 1009 /* 1010 * TODO: bypass the loading in sriov for now 1011 */ 1012 if (amdgpu_sriov_vf(psp->adev)) 1013 return 0; 1014 1015 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1016 if (!cmd) 1017 return -ENOMEM; 1018 1019 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1020 memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); 1021 1022 psp_prep_ta_load_cmd_buf(cmd, 1023 psp->fw_pri_mc_addr, 1024 psp->ta_dtm_ucode_size, 1025 psp->dtm_context.dtm_shared_mc_addr, 1026 PSP_DTM_SHARED_MEM_SIZE); 1027 1028 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1029 1030 if (!ret) { 1031 psp->dtm_context.dtm_initialized = true; 1032 psp->dtm_context.session_id = cmd->resp.session_id; 1033 } 1034 1035 kfree(cmd); 1036 1037 return ret; 1038 } 1039 1040 static int psp_dtm_initialize(struct psp_context *psp) 1041 { 1042 int ret; 1043 1044 /* 1045 * TODO: bypass the initialize in sriov for now 1046 */ 1047 if (amdgpu_sriov_vf(psp->adev)) 1048 return 0; 1049 1050 if (!psp->adev->psp.ta_dtm_ucode_size || 1051 !psp->adev->psp.ta_dtm_start_addr) { 1052 dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n"); 1053 return 0; 1054 } 1055 1056 if (!psp->dtm_context.dtm_initialized) { 1057 ret = psp_dtm_init_shared_buf(psp); 1058 if (ret) 1059 return ret; 1060 } 1061 1062 ret = psp_dtm_load(psp); 1063 if (ret) 1064 return ret; 1065 1066 return 0; 1067 } 1068 1069 static int psp_dtm_unload(struct psp_context *psp) 1070 { 1071 int ret; 1072 struct psp_gfx_cmd_resp *cmd; 1073 1074 /* 1075 * TODO: bypass the unloading in sriov for now 1076 */ 1077 if (amdgpu_sriov_vf(psp->adev)) 1078 return 0; 1079 1080 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1081 if (!cmd) 1082 return -ENOMEM; 1083 1084 psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id); 1085 1086 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1087 1088 kfree(cmd); 1089 1090 return ret; 1091 } 1092 1093 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1094 { 1095 /* 1096 * TODO: bypass the loading in sriov for now 1097 */ 1098 if (amdgpu_sriov_vf(psp->adev)) 1099 return 0; 1100 1101 return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id); 1102 } 1103 1104 static int psp_dtm_terminate(struct psp_context *psp) 1105 { 1106 int ret; 1107 1108 /* 1109 * TODO: bypass the terminate in sriov for now 1110 */ 1111 if (amdgpu_sriov_vf(psp->adev)) 1112 return 0; 1113 1114 if (!psp->dtm_context.dtm_initialized) 1115 return 0; 1116 1117 ret = psp_dtm_unload(psp); 1118 if (ret) 1119 return ret; 1120 1121 psp->dtm_context.dtm_initialized = false; 1122 1123 /* free hdcp shared memory */ 1124 amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, 1125 &psp->dtm_context.dtm_shared_mc_addr, 1126 &psp->dtm_context.dtm_shared_buf); 1127 1128 return 0; 1129 } 1130 // DTM end 1131 1132 static int psp_hw_start(struct psp_context *psp) 1133 { 1134 struct amdgpu_device *adev = psp->adev; 1135 int ret; 1136 1137 if (!amdgpu_sriov_vf(adev)) { 1138 if (psp->kdb_bin_size && 1139 (psp->funcs->bootloader_load_kdb != NULL)) { 1140 ret = psp_bootloader_load_kdb(psp); 1141 if (ret) { 1142 DRM_ERROR("PSP load kdb failed!\n"); 1143 return ret; 1144 } 1145 } 1146 1147 ret = psp_bootloader_load_sysdrv(psp); 1148 if (ret) { 1149 DRM_ERROR("PSP load sysdrv failed!\n"); 1150 return ret; 1151 } 1152 1153 ret = psp_bootloader_load_sos(psp); 1154 if (ret) { 1155 DRM_ERROR("PSP load sos failed!\n"); 1156 return ret; 1157 } 1158 } 1159 1160 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 1161 if (ret) { 1162 DRM_ERROR("PSP create ring failed!\n"); 1163 return ret; 1164 } 1165 1166 ret = psp_tmr_init(psp); 1167 if (ret) { 1168 DRM_ERROR("PSP tmr init failed!\n"); 1169 return ret; 1170 } 1171 1172 /* 1173 * For those ASICs with DF Cstate management centralized 1174 * to PMFW, TMR setup should be performed after PMFW 1175 * loaded and before other non-psp firmware loaded. 1176 */ 1177 if (!psp->pmfw_centralized_cstate_management) { 1178 ret = psp_tmr_load(psp); 1179 if (ret) { 1180 DRM_ERROR("PSP load tmr failed!\n"); 1181 return ret; 1182 } 1183 } 1184 1185 return 0; 1186 } 1187 1188 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 1189 enum psp_gfx_fw_type *type) 1190 { 1191 switch (ucode->ucode_id) { 1192 case AMDGPU_UCODE_ID_SDMA0: 1193 *type = GFX_FW_TYPE_SDMA0; 1194 break; 1195 case AMDGPU_UCODE_ID_SDMA1: 1196 *type = GFX_FW_TYPE_SDMA1; 1197 break; 1198 case AMDGPU_UCODE_ID_SDMA2: 1199 *type = GFX_FW_TYPE_SDMA2; 1200 break; 1201 case AMDGPU_UCODE_ID_SDMA3: 1202 *type = GFX_FW_TYPE_SDMA3; 1203 break; 1204 case AMDGPU_UCODE_ID_SDMA4: 1205 *type = GFX_FW_TYPE_SDMA4; 1206 break; 1207 case AMDGPU_UCODE_ID_SDMA5: 1208 *type = GFX_FW_TYPE_SDMA5; 1209 break; 1210 case AMDGPU_UCODE_ID_SDMA6: 1211 *type = GFX_FW_TYPE_SDMA6; 1212 break; 1213 case AMDGPU_UCODE_ID_SDMA7: 1214 *type = GFX_FW_TYPE_SDMA7; 1215 break; 1216 case AMDGPU_UCODE_ID_CP_CE: 1217 *type = GFX_FW_TYPE_CP_CE; 1218 break; 1219 case AMDGPU_UCODE_ID_CP_PFP: 1220 *type = GFX_FW_TYPE_CP_PFP; 1221 break; 1222 case AMDGPU_UCODE_ID_CP_ME: 1223 *type = GFX_FW_TYPE_CP_ME; 1224 break; 1225 case AMDGPU_UCODE_ID_CP_MEC1: 1226 *type = GFX_FW_TYPE_CP_MEC; 1227 break; 1228 case AMDGPU_UCODE_ID_CP_MEC1_JT: 1229 *type = GFX_FW_TYPE_CP_MEC_ME1; 1230 break; 1231 case AMDGPU_UCODE_ID_CP_MEC2: 1232 *type = GFX_FW_TYPE_CP_MEC; 1233 break; 1234 case AMDGPU_UCODE_ID_CP_MEC2_JT: 1235 *type = GFX_FW_TYPE_CP_MEC_ME2; 1236 break; 1237 case AMDGPU_UCODE_ID_RLC_G: 1238 *type = GFX_FW_TYPE_RLC_G; 1239 break; 1240 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 1241 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 1242 break; 1243 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 1244 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 1245 break; 1246 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 1247 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 1248 break; 1249 case AMDGPU_UCODE_ID_SMC: 1250 *type = GFX_FW_TYPE_SMU; 1251 break; 1252 case AMDGPU_UCODE_ID_UVD: 1253 *type = GFX_FW_TYPE_UVD; 1254 break; 1255 case AMDGPU_UCODE_ID_UVD1: 1256 *type = GFX_FW_TYPE_UVD1; 1257 break; 1258 case AMDGPU_UCODE_ID_VCE: 1259 *type = GFX_FW_TYPE_VCE; 1260 break; 1261 case AMDGPU_UCODE_ID_VCN: 1262 *type = GFX_FW_TYPE_VCN; 1263 break; 1264 case AMDGPU_UCODE_ID_VCN1: 1265 *type = GFX_FW_TYPE_VCN1; 1266 break; 1267 case AMDGPU_UCODE_ID_DMCU_ERAM: 1268 *type = GFX_FW_TYPE_DMCU_ERAM; 1269 break; 1270 case AMDGPU_UCODE_ID_DMCU_INTV: 1271 *type = GFX_FW_TYPE_DMCU_ISR; 1272 break; 1273 case AMDGPU_UCODE_ID_VCN0_RAM: 1274 *type = GFX_FW_TYPE_VCN0_RAM; 1275 break; 1276 case AMDGPU_UCODE_ID_VCN1_RAM: 1277 *type = GFX_FW_TYPE_VCN1_RAM; 1278 break; 1279 case AMDGPU_UCODE_ID_DMCUB: 1280 *type = GFX_FW_TYPE_DMUB; 1281 break; 1282 case AMDGPU_UCODE_ID_MAXIMUM: 1283 default: 1284 return -EINVAL; 1285 } 1286 1287 return 0; 1288 } 1289 1290 static void psp_print_fw_hdr(struct psp_context *psp, 1291 struct amdgpu_firmware_info *ucode) 1292 { 1293 struct amdgpu_device *adev = psp->adev; 1294 struct common_firmware_header *hdr; 1295 1296 switch (ucode->ucode_id) { 1297 case AMDGPU_UCODE_ID_SDMA0: 1298 case AMDGPU_UCODE_ID_SDMA1: 1299 case AMDGPU_UCODE_ID_SDMA2: 1300 case AMDGPU_UCODE_ID_SDMA3: 1301 case AMDGPU_UCODE_ID_SDMA4: 1302 case AMDGPU_UCODE_ID_SDMA5: 1303 case AMDGPU_UCODE_ID_SDMA6: 1304 case AMDGPU_UCODE_ID_SDMA7: 1305 hdr = (struct common_firmware_header *) 1306 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 1307 amdgpu_ucode_print_sdma_hdr(hdr); 1308 break; 1309 case AMDGPU_UCODE_ID_CP_CE: 1310 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 1311 amdgpu_ucode_print_gfx_hdr(hdr); 1312 break; 1313 case AMDGPU_UCODE_ID_CP_PFP: 1314 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 1315 amdgpu_ucode_print_gfx_hdr(hdr); 1316 break; 1317 case AMDGPU_UCODE_ID_CP_ME: 1318 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 1319 amdgpu_ucode_print_gfx_hdr(hdr); 1320 break; 1321 case AMDGPU_UCODE_ID_CP_MEC1: 1322 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 1323 amdgpu_ucode_print_gfx_hdr(hdr); 1324 break; 1325 case AMDGPU_UCODE_ID_RLC_G: 1326 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 1327 amdgpu_ucode_print_rlc_hdr(hdr); 1328 break; 1329 case AMDGPU_UCODE_ID_SMC: 1330 hdr = (struct common_firmware_header *)adev->pm.fw->data; 1331 amdgpu_ucode_print_smc_hdr(hdr); 1332 break; 1333 default: 1334 break; 1335 } 1336 } 1337 1338 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, 1339 struct psp_gfx_cmd_resp *cmd) 1340 { 1341 int ret; 1342 uint64_t fw_mem_mc_addr = ucode->mc_addr; 1343 1344 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 1345 1346 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1347 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 1348 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 1349 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 1350 1351 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 1352 if (ret) 1353 DRM_ERROR("Unknown firmware type\n"); 1354 1355 return ret; 1356 } 1357 1358 static int psp_execute_np_fw_load(struct psp_context *psp, 1359 struct amdgpu_firmware_info *ucode) 1360 { 1361 int ret = 0; 1362 1363 ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd); 1364 if (ret) 1365 return ret; 1366 1367 ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, 1368 psp->fence_buf_mc_addr); 1369 1370 return ret; 1371 } 1372 1373 static int psp_np_fw_load(struct psp_context *psp) 1374 { 1375 int i, ret; 1376 struct amdgpu_firmware_info *ucode; 1377 struct amdgpu_device* adev = psp->adev; 1378 1379 if (psp->autoload_supported || 1380 psp->pmfw_centralized_cstate_management) { 1381 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 1382 if (!ucode->fw || amdgpu_sriov_vf(adev)) 1383 goto out; 1384 1385 ret = psp_execute_np_fw_load(psp, ucode); 1386 if (ret) 1387 return ret; 1388 } 1389 1390 if (psp->pmfw_centralized_cstate_management) { 1391 ret = psp_tmr_load(psp); 1392 if (ret) { 1393 DRM_ERROR("PSP load tmr failed!\n"); 1394 return ret; 1395 } 1396 } 1397 1398 out: 1399 for (i = 0; i < adev->firmware.max_ucodes; i++) { 1400 ucode = &adev->firmware.ucode[i]; 1401 if (!ucode->fw) 1402 continue; 1403 1404 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1405 (psp_smu_reload_quirk(psp) || 1406 psp->autoload_supported || 1407 psp->pmfw_centralized_cstate_management)) 1408 continue; 1409 1410 if (amdgpu_sriov_vf(adev) && 1411 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 1412 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 1413 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 1414 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 1415 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 1416 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 1417 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 1418 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 1419 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G 1420 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 1421 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 1422 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 1423 || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) 1424 /*skip ucode loading in SRIOV VF */ 1425 continue; 1426 1427 if (psp->autoload_supported && 1428 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 1429 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 1430 /* skip mec JT when autoload is enabled */ 1431 continue; 1432 1433 psp_print_fw_hdr(psp, ucode); 1434 1435 ret = psp_execute_np_fw_load(psp, ucode); 1436 if (ret) 1437 return ret; 1438 1439 /* Start rlc autoload after psp recieved all the gfx firmware */ 1440 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 1441 AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { 1442 ret = psp_rlc_autoload(psp); 1443 if (ret) { 1444 DRM_ERROR("Failed to start rlc autoload\n"); 1445 return ret; 1446 } 1447 } 1448 #if 0 1449 /* check if firmware loaded sucessfully */ 1450 if (!amdgpu_psp_check_fw_loading_status(adev, i)) 1451 return -EINVAL; 1452 #endif 1453 } 1454 1455 return 0; 1456 } 1457 1458 static int psp_load_fw(struct amdgpu_device *adev) 1459 { 1460 int ret; 1461 struct psp_context *psp = &adev->psp; 1462 1463 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { 1464 psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ 1465 goto skip_memalloc; 1466 } 1467 1468 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1469 if (!psp->cmd) 1470 return -ENOMEM; 1471 1472 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 1473 AMDGPU_GEM_DOMAIN_GTT, 1474 &psp->fw_pri_bo, 1475 &psp->fw_pri_mc_addr, 1476 &psp->fw_pri_buf); 1477 if (ret) 1478 goto failed; 1479 1480 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 1481 AMDGPU_GEM_DOMAIN_VRAM, 1482 &psp->fence_buf_bo, 1483 &psp->fence_buf_mc_addr, 1484 &psp->fence_buf); 1485 if (ret) 1486 goto failed; 1487 1488 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 1489 AMDGPU_GEM_DOMAIN_VRAM, 1490 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1491 (void **)&psp->cmd_buf_mem); 1492 if (ret) 1493 goto failed; 1494 1495 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 1496 1497 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 1498 if (ret) { 1499 DRM_ERROR("PSP ring init failed!\n"); 1500 goto failed; 1501 } 1502 1503 skip_memalloc: 1504 ret = psp_hw_start(psp); 1505 if (ret) 1506 goto failed; 1507 1508 ret = psp_np_fw_load(psp); 1509 if (ret) 1510 goto failed; 1511 1512 ret = psp_asd_load(psp); 1513 if (ret) { 1514 DRM_ERROR("PSP load asd failed!\n"); 1515 return ret; 1516 } 1517 1518 if (psp->adev->psp.ta_fw) { 1519 ret = psp_ras_initialize(psp); 1520 if (ret) 1521 dev_err(psp->adev->dev, 1522 "RAS: Failed to initialize RAS\n"); 1523 1524 ret = psp_hdcp_initialize(psp); 1525 if (ret) 1526 dev_err(psp->adev->dev, 1527 "HDCP: Failed to initialize HDCP\n"); 1528 1529 ret = psp_dtm_initialize(psp); 1530 if (ret) 1531 dev_err(psp->adev->dev, 1532 "DTM: Failed to initialize DTM\n"); 1533 } 1534 1535 return 0; 1536 1537 failed: 1538 /* 1539 * all cleanup jobs (xgmi terminate, ras terminate, 1540 * ring destroy, cmd/fence/fw buffers destory, 1541 * psp->cmd destory) are delayed to psp_hw_fini 1542 */ 1543 return ret; 1544 } 1545 1546 static int psp_hw_init(void *handle) 1547 { 1548 int ret; 1549 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1550 1551 mutex_lock(&adev->firmware.mutex); 1552 /* 1553 * This sequence is just used on hw_init only once, no need on 1554 * resume. 1555 */ 1556 ret = amdgpu_ucode_init_bo(adev); 1557 if (ret) 1558 goto failed; 1559 1560 ret = psp_load_fw(adev); 1561 if (ret) { 1562 DRM_ERROR("PSP firmware loading failed\n"); 1563 goto failed; 1564 } 1565 1566 mutex_unlock(&adev->firmware.mutex); 1567 return 0; 1568 1569 failed: 1570 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 1571 mutex_unlock(&adev->firmware.mutex); 1572 return -EINVAL; 1573 } 1574 1575 static int psp_hw_fini(void *handle) 1576 { 1577 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1578 struct psp_context *psp = &adev->psp; 1579 void *tmr_buf; 1580 void **pptr; 1581 1582 if (psp->adev->psp.ta_fw) { 1583 psp_ras_terminate(psp); 1584 psp_dtm_terminate(psp); 1585 psp_hdcp_terminate(psp); 1586 } 1587 1588 psp_asd_unload(psp); 1589 1590 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 1591 1592 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 1593 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 1594 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 1595 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 1596 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 1597 &psp->fence_buf_mc_addr, &psp->fence_buf); 1598 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1599 (void **)&psp->cmd_buf_mem); 1600 1601 kfree(psp->cmd); 1602 psp->cmd = NULL; 1603 1604 return 0; 1605 } 1606 1607 static int psp_suspend(void *handle) 1608 { 1609 int ret; 1610 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1611 struct psp_context *psp = &adev->psp; 1612 1613 if (adev->gmc.xgmi.num_physical_nodes > 1 && 1614 psp->xgmi_context.initialized == 1) { 1615 ret = psp_xgmi_terminate(psp); 1616 if (ret) { 1617 DRM_ERROR("Failed to terminate xgmi ta\n"); 1618 return ret; 1619 } 1620 } 1621 1622 if (psp->adev->psp.ta_fw) { 1623 ret = psp_ras_terminate(psp); 1624 if (ret) { 1625 DRM_ERROR("Failed to terminate ras ta\n"); 1626 return ret; 1627 } 1628 ret = psp_hdcp_terminate(psp); 1629 if (ret) { 1630 DRM_ERROR("Failed to terminate hdcp ta\n"); 1631 return ret; 1632 } 1633 ret = psp_dtm_terminate(psp); 1634 if (ret) { 1635 DRM_ERROR("Failed to terminate dtm ta\n"); 1636 return ret; 1637 } 1638 } 1639 1640 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 1641 if (ret) { 1642 DRM_ERROR("PSP ring stop failed\n"); 1643 return ret; 1644 } 1645 1646 return 0; 1647 } 1648 1649 static int psp_resume(void *handle) 1650 { 1651 int ret; 1652 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1653 struct psp_context *psp = &adev->psp; 1654 1655 DRM_INFO("PSP is resuming...\n"); 1656 1657 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 1658 if (ret) { 1659 DRM_ERROR("Failed to process memory training!\n"); 1660 return ret; 1661 } 1662 1663 mutex_lock(&adev->firmware.mutex); 1664 1665 ret = psp_hw_start(psp); 1666 if (ret) 1667 goto failed; 1668 1669 ret = psp_np_fw_load(psp); 1670 if (ret) 1671 goto failed; 1672 1673 ret = psp_asd_load(psp); 1674 if (ret) { 1675 DRM_ERROR("PSP load asd failed!\n"); 1676 goto failed; 1677 } 1678 1679 if (adev->gmc.xgmi.num_physical_nodes > 1) { 1680 ret = psp_xgmi_initialize(psp); 1681 /* Warning the XGMI seesion initialize failure 1682 * Instead of stop driver initialization 1683 */ 1684 if (ret) 1685 dev_err(psp->adev->dev, 1686 "XGMI: Failed to initialize XGMI session\n"); 1687 } 1688 1689 if (psp->adev->psp.ta_fw) { 1690 ret = psp_ras_initialize(psp); 1691 if (ret) 1692 dev_err(psp->adev->dev, 1693 "RAS: Failed to initialize RAS\n"); 1694 1695 ret = psp_hdcp_initialize(psp); 1696 if (ret) 1697 dev_err(psp->adev->dev, 1698 "HDCP: Failed to initialize HDCP\n"); 1699 1700 ret = psp_dtm_initialize(psp); 1701 if (ret) 1702 dev_err(psp->adev->dev, 1703 "DTM: Failed to initialize DTM\n"); 1704 } 1705 1706 mutex_unlock(&adev->firmware.mutex); 1707 1708 return 0; 1709 1710 failed: 1711 DRM_ERROR("PSP resume failed\n"); 1712 mutex_unlock(&adev->firmware.mutex); 1713 return ret; 1714 } 1715 1716 int psp_gpu_reset(struct amdgpu_device *adev) 1717 { 1718 int ret; 1719 1720 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1721 return 0; 1722 1723 mutex_lock(&adev->psp.mutex); 1724 ret = psp_mode1_reset(&adev->psp); 1725 mutex_unlock(&adev->psp.mutex); 1726 1727 return ret; 1728 } 1729 1730 int psp_rlc_autoload_start(struct psp_context *psp) 1731 { 1732 int ret; 1733 struct psp_gfx_cmd_resp *cmd; 1734 1735 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1736 if (!cmd) 1737 return -ENOMEM; 1738 1739 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 1740 1741 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1742 psp->fence_buf_mc_addr); 1743 kfree(cmd); 1744 return ret; 1745 } 1746 1747 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, 1748 uint64_t cmd_gpu_addr, int cmd_size) 1749 { 1750 struct amdgpu_firmware_info ucode = {0}; 1751 1752 ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : 1753 AMDGPU_UCODE_ID_VCN0_RAM; 1754 ucode.mc_addr = cmd_gpu_addr; 1755 ucode.ucode_size = cmd_size; 1756 1757 return psp_execute_np_fw_load(&adev->psp, &ucode); 1758 } 1759 1760 int psp_ring_cmd_submit(struct psp_context *psp, 1761 uint64_t cmd_buf_mc_addr, 1762 uint64_t fence_mc_addr, 1763 int index) 1764 { 1765 unsigned int psp_write_ptr_reg = 0; 1766 struct psp_gfx_rb_frame *write_frame; 1767 struct psp_ring *ring = &psp->km_ring; 1768 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 1769 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 1770 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 1771 struct amdgpu_device *adev = psp->adev; 1772 uint32_t ring_size_dw = ring->ring_size / 4; 1773 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 1774 1775 /* KM (GPCOM) prepare write pointer */ 1776 psp_write_ptr_reg = psp_ring_get_wptr(psp); 1777 1778 /* Update KM RB frame pointer to new frame */ 1779 /* write_frame ptr increments by size of rb_frame in bytes */ 1780 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 1781 if ((psp_write_ptr_reg % ring_size_dw) == 0) 1782 write_frame = ring_buffer_start; 1783 else 1784 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 1785 /* Check invalid write_frame ptr address */ 1786 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 1787 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 1788 ring_buffer_start, ring_buffer_end, write_frame); 1789 DRM_ERROR("write_frame is pointing to address out of bounds\n"); 1790 return -EINVAL; 1791 } 1792 1793 /* Initialize KM RB frame */ 1794 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 1795 1796 /* Update KM RB frame */ 1797 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 1798 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 1799 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 1800 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 1801 write_frame->fence_value = index; 1802 amdgpu_asic_flush_hdp(adev, NULL); 1803 1804 /* Update the write Pointer in DWORDs */ 1805 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 1806 psp_ring_set_wptr(psp, psp_write_ptr_reg); 1807 return 0; 1808 } 1809 1810 static bool psp_check_fw_loading_status(struct amdgpu_device *adev, 1811 enum AMDGPU_UCODE_ID ucode_type) 1812 { 1813 struct amdgpu_firmware_info *ucode = NULL; 1814 1815 if (!adev->firmware.fw_size) 1816 return false; 1817 1818 ucode = &adev->firmware.ucode[ucode_type]; 1819 if (!ucode->fw || !ucode->ucode_size) 1820 return false; 1821 1822 return psp_compare_sram_data(&adev->psp, ucode, ucode_type); 1823 } 1824 1825 static int psp_set_clockgating_state(void *handle, 1826 enum amd_clockgating_state state) 1827 { 1828 return 0; 1829 } 1830 1831 static int psp_set_powergating_state(void *handle, 1832 enum amd_powergating_state state) 1833 { 1834 return 0; 1835 } 1836 1837 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 1838 struct device_attribute *attr, 1839 char *buf) 1840 { 1841 struct drm_device *ddev = dev_get_drvdata(dev); 1842 struct amdgpu_device *adev = ddev->dev_private; 1843 uint32_t fw_ver; 1844 int ret; 1845 1846 mutex_lock(&adev->psp.mutex); 1847 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 1848 mutex_unlock(&adev->psp.mutex); 1849 1850 if (ret) { 1851 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); 1852 return ret; 1853 } 1854 1855 return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); 1856 } 1857 1858 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 1859 struct device_attribute *attr, 1860 const char *buf, 1861 size_t count) 1862 { 1863 struct drm_device *ddev = dev_get_drvdata(dev); 1864 struct amdgpu_device *adev = ddev->dev_private; 1865 void *cpu_addr; 1866 dma_addr_t dma_addr; 1867 int ret; 1868 char fw_name[100]; 1869 const struct firmware *usbc_pd_fw; 1870 1871 1872 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 1873 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 1874 if (ret) 1875 goto fail; 1876 1877 /* We need contiguous physical mem to place the FW for psp to access */ 1878 cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); 1879 1880 ret = dma_mapping_error(adev->dev, dma_addr); 1881 if (ret) 1882 goto rel_buf; 1883 1884 memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 1885 1886 /* 1887 * x86 specific workaround. 1888 * Without it the buffer is invisible in PSP. 1889 * 1890 * TODO Remove once PSP starts snooping CPU cache 1891 */ 1892 #ifdef CONFIG_X86 1893 clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); 1894 #endif 1895 1896 mutex_lock(&adev->psp.mutex); 1897 ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); 1898 mutex_unlock(&adev->psp.mutex); 1899 1900 rel_buf: 1901 dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); 1902 release_firmware(usbc_pd_fw); 1903 1904 fail: 1905 if (ret) { 1906 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); 1907 return ret; 1908 } 1909 1910 return count; 1911 } 1912 1913 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, 1914 psp_usbc_pd_fw_sysfs_read, 1915 psp_usbc_pd_fw_sysfs_write); 1916 1917 1918 1919 const struct amd_ip_funcs psp_ip_funcs = { 1920 .name = "psp", 1921 .early_init = psp_early_init, 1922 .late_init = psp_late_init, 1923 .sw_init = psp_sw_init, 1924 .sw_fini = psp_sw_fini, 1925 .hw_init = psp_hw_init, 1926 .hw_fini = psp_hw_fini, 1927 .suspend = psp_suspend, 1928 .resume = psp_resume, 1929 .is_idle = NULL, 1930 .check_soft_reset = NULL, 1931 .wait_for_idle = NULL, 1932 .soft_reset = NULL, 1933 .set_clockgating_state = psp_set_clockgating_state, 1934 .set_powergating_state = psp_set_powergating_state, 1935 }; 1936 1937 static int psp_sysfs_init(struct amdgpu_device *adev) 1938 { 1939 int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); 1940 1941 if (ret) 1942 DRM_ERROR("Failed to create USBC PD FW control file!"); 1943 1944 return ret; 1945 } 1946 1947 static void psp_sysfs_fini(struct amdgpu_device *adev) 1948 { 1949 device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); 1950 } 1951 1952 static const struct amdgpu_psp_funcs psp_funcs = { 1953 .check_fw_loading_status = psp_check_fw_loading_status, 1954 }; 1955 1956 static void psp_set_funcs(struct amdgpu_device *adev) 1957 { 1958 if (NULL == adev->firmware.funcs) 1959 adev->firmware.funcs = &psp_funcs; 1960 } 1961 1962 const struct amdgpu_ip_block_version psp_v3_1_ip_block = 1963 { 1964 .type = AMD_IP_BLOCK_TYPE_PSP, 1965 .major = 3, 1966 .minor = 1, 1967 .rev = 0, 1968 .funcs = &psp_ip_funcs, 1969 }; 1970 1971 const struct amdgpu_ip_block_version psp_v10_0_ip_block = 1972 { 1973 .type = AMD_IP_BLOCK_TYPE_PSP, 1974 .major = 10, 1975 .minor = 0, 1976 .rev = 0, 1977 .funcs = &psp_ip_funcs, 1978 }; 1979 1980 const struct amdgpu_ip_block_version psp_v11_0_ip_block = 1981 { 1982 .type = AMD_IP_BLOCK_TYPE_PSP, 1983 .major = 11, 1984 .minor = 0, 1985 .rev = 0, 1986 .funcs = &psp_ip_funcs, 1987 }; 1988 1989 const struct amdgpu_ip_block_version psp_v12_0_ip_block = 1990 { 1991 .type = AMD_IP_BLOCK_TYPE_PSP, 1992 .major = 12, 1993 .minor = 0, 1994 .rev = 0, 1995 .funcs = &psp_ip_funcs, 1996 }; 1997