1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <linux/dma-mapping.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "soc15_common.h" 33 #include "psp_v3_1.h" 34 #include "psp_v10_0.h" 35 #include "psp_v11_0.h" 36 #include "psp_v12_0.h" 37 38 #include "amdgpu_ras.h" 39 40 static int psp_sysfs_init(struct amdgpu_device *adev); 41 static void psp_sysfs_fini(struct amdgpu_device *adev); 42 43 static int psp_load_smu_fw(struct psp_context *psp); 44 45 /* 46 * Due to DF Cstate management centralized to PMFW, the firmware 47 * loading sequence will be updated as below: 48 * - Load KDB 49 * - Load SYS_DRV 50 * - Load tOS 51 * - Load PMFW 52 * - Setup TMR 53 * - Load other non-psp fw 54 * - Load ASD 55 * - Load XGMI/RAS/HDCP/DTM TA if any 56 * 57 * This new sequence is required for 58 * - Arcturus 59 * - Navi12 and onwards 60 */ 61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 62 { 63 struct amdgpu_device *adev = psp->adev; 64 65 psp->pmfw_centralized_cstate_management = false; 66 67 if (amdgpu_sriov_vf(adev)) 68 return; 69 70 if (adev->flags & AMD_IS_APU) 71 return; 72 73 if ((adev->asic_type == CHIP_ARCTURUS) || 74 (adev->asic_type >= CHIP_NAVI12)) 75 psp->pmfw_centralized_cstate_management = true; 76 } 77 78 static int psp_early_init(void *handle) 79 { 80 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 81 struct psp_context *psp = &adev->psp; 82 83 switch (adev->asic_type) { 84 case CHIP_VEGA10: 85 case CHIP_VEGA12: 86 psp_v3_1_set_psp_funcs(psp); 87 psp->autoload_supported = false; 88 break; 89 case CHIP_RAVEN: 90 psp_v10_0_set_psp_funcs(psp); 91 psp->autoload_supported = false; 92 break; 93 case CHIP_VEGA20: 94 case CHIP_ARCTURUS: 95 psp_v11_0_set_psp_funcs(psp); 96 psp->autoload_supported = false; 97 break; 98 case CHIP_NAVI10: 99 case CHIP_NAVI14: 100 case CHIP_NAVI12: 101 case CHIP_SIENNA_CICHLID: 102 case CHIP_NAVY_FLOUNDER: 103 psp_v11_0_set_psp_funcs(psp); 104 psp->autoload_supported = true; 105 break; 106 case CHIP_RENOIR: 107 psp_v12_0_set_psp_funcs(psp); 108 break; 109 default: 110 return -EINVAL; 111 } 112 113 psp->adev = adev; 114 115 psp_check_pmfw_centralized_cstate_management(psp); 116 117 return 0; 118 } 119 120 static void psp_memory_training_fini(struct psp_context *psp) 121 { 122 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 123 124 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 125 kfree(ctx->sys_cache); 126 ctx->sys_cache = NULL; 127 } 128 129 static int psp_memory_training_init(struct psp_context *psp) 130 { 131 int ret; 132 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 133 134 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 135 DRM_DEBUG("memory training is not supported!\n"); 136 return 0; 137 } 138 139 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 140 if (ctx->sys_cache == NULL) { 141 DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); 142 ret = -ENOMEM; 143 goto Err_out; 144 } 145 146 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 147 ctx->train_data_size, 148 ctx->p2c_train_data_offset, 149 ctx->c2p_train_data_offset); 150 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 151 return 0; 152 153 Err_out: 154 psp_memory_training_fini(psp); 155 return ret; 156 } 157 158 static int psp_sw_init(void *handle) 159 { 160 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 161 struct psp_context *psp = &adev->psp; 162 int ret; 163 164 if (!amdgpu_sriov_vf(adev)) { 165 ret = psp_init_microcode(psp); 166 if (ret) { 167 DRM_ERROR("Failed to load psp firmware!\n"); 168 return ret; 169 } 170 } 171 172 ret = psp_memory_training_init(psp); 173 if (ret) { 174 DRM_ERROR("Failed to initialize memory training!\n"); 175 return ret; 176 } 177 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 178 if (ret) { 179 DRM_ERROR("Failed to process memory training!\n"); 180 return ret; 181 } 182 183 if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) { 184 ret= psp_sysfs_init(adev); 185 if (ret) { 186 return ret; 187 } 188 } 189 190 return 0; 191 } 192 193 static int psp_sw_fini(void *handle) 194 { 195 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 196 197 psp_memory_training_fini(&adev->psp); 198 if (adev->psp.sos_fw) { 199 release_firmware(adev->psp.sos_fw); 200 adev->psp.sos_fw = NULL; 201 } 202 if (adev->psp.asd_fw) { 203 release_firmware(adev->psp.asd_fw); 204 adev->psp.asd_fw = NULL; 205 } 206 if (adev->psp.ta_fw) { 207 release_firmware(adev->psp.ta_fw); 208 adev->psp.ta_fw = NULL; 209 } 210 211 if (adev->asic_type == CHIP_NAVI10) 212 psp_sysfs_fini(adev); 213 214 return 0; 215 } 216 217 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 218 uint32_t reg_val, uint32_t mask, bool check_changed) 219 { 220 uint32_t val; 221 int i; 222 struct amdgpu_device *adev = psp->adev; 223 224 if (psp->adev->in_pci_err_recovery) 225 return 0; 226 227 for (i = 0; i < adev->usec_timeout; i++) { 228 val = RREG32(reg_index); 229 if (check_changed) { 230 if (val != reg_val) 231 return 0; 232 } else { 233 if ((val & mask) == reg_val) 234 return 0; 235 } 236 udelay(1); 237 } 238 239 return -ETIME; 240 } 241 242 static int 243 psp_cmd_submit_buf(struct psp_context *psp, 244 struct amdgpu_firmware_info *ucode, 245 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 246 { 247 int ret; 248 int index; 249 int timeout = 2000; 250 bool ras_intr = false; 251 bool skip_unsupport = false; 252 253 if (psp->adev->in_pci_err_recovery) 254 return 0; 255 256 mutex_lock(&psp->mutex); 257 258 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 259 260 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 261 262 index = atomic_inc_return(&psp->fence_value); 263 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 264 if (ret) { 265 atomic_dec(&psp->fence_value); 266 mutex_unlock(&psp->mutex); 267 return ret; 268 } 269 270 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 271 while (*((unsigned int *)psp->fence_buf) != index) { 272 if (--timeout == 0) 273 break; 274 /* 275 * Shouldn't wait for timeout when err_event_athub occurs, 276 * because gpu reset thread triggered and lock resource should 277 * be released for psp resume sequence. 278 */ 279 ras_intr = amdgpu_ras_intr_triggered(); 280 if (ras_intr) 281 break; 282 msleep(1); 283 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 284 } 285 286 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 287 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 288 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 289 290 /* In some cases, psp response status is not 0 even there is no 291 * problem while the command is submitted. Some version of PSP FW 292 * doesn't write 0 to that field. 293 * So here we would like to only print a warning instead of an error 294 * during psp initialization to avoid breaking hw_init and it doesn't 295 * return -EINVAL. 296 */ 297 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 298 if (ucode) 299 DRM_WARN("failed to load ucode id (%d) ", 300 ucode->ucode_id); 301 DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n", 302 psp->cmd_buf_mem->cmd_id, 303 psp->cmd_buf_mem->resp.status); 304 if (!timeout) { 305 mutex_unlock(&psp->mutex); 306 return -EINVAL; 307 } 308 } 309 310 /* get xGMI session id from response buffer */ 311 cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id; 312 313 if (ucode) { 314 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 315 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 316 } 317 mutex_unlock(&psp->mutex); 318 319 return ret; 320 } 321 322 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 323 struct psp_gfx_cmd_resp *cmd, 324 uint64_t tmr_mc, uint32_t size) 325 { 326 if (amdgpu_sriov_vf(psp->adev)) 327 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 328 else 329 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 330 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 331 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 332 cmd->cmd.cmd_setup_tmr.buf_size = size; 333 } 334 335 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 336 uint64_t pri_buf_mc, uint32_t size) 337 { 338 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 339 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 340 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 341 cmd->cmd.cmd_load_toc.toc_size = size; 342 } 343 344 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 345 static int psp_load_toc(struct psp_context *psp, 346 uint32_t *tmr_size) 347 { 348 int ret; 349 struct psp_gfx_cmd_resp *cmd; 350 351 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 352 if (!cmd) 353 return -ENOMEM; 354 /* Copy toc to psp firmware private buffer */ 355 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 356 memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size); 357 358 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size); 359 360 ret = psp_cmd_submit_buf(psp, NULL, cmd, 361 psp->fence_buf_mc_addr); 362 if (!ret) 363 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 364 kfree(cmd); 365 return ret; 366 } 367 368 /* Set up Trusted Memory Region */ 369 static int psp_tmr_init(struct psp_context *psp) 370 { 371 int ret; 372 int tmr_size; 373 void *tmr_buf; 374 void **pptr; 375 376 /* 377 * According to HW engineer, they prefer the TMR address be "naturally 378 * aligned" , e.g. the start address be an integer divide of TMR size. 379 * 380 * Note: this memory need be reserved till the driver 381 * uninitializes. 382 */ 383 tmr_size = PSP_TMR_SIZE; 384 385 /* For ASICs support RLC autoload, psp will parse the toc 386 * and calculate the total size of TMR needed */ 387 if (!amdgpu_sriov_vf(psp->adev) && 388 psp->toc_start_addr && 389 psp->toc_bin_size && 390 psp->fw_pri_buf) { 391 ret = psp_load_toc(psp, &tmr_size); 392 if (ret) { 393 DRM_ERROR("Failed to load toc\n"); 394 return ret; 395 } 396 } 397 398 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 399 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE, 400 AMDGPU_GEM_DOMAIN_VRAM, 401 &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 402 403 return ret; 404 } 405 406 static int psp_clear_vf_fw(struct psp_context *psp) 407 { 408 int ret; 409 struct psp_gfx_cmd_resp *cmd; 410 411 if (!amdgpu_sriov_vf(psp->adev) || psp->adev->asic_type != CHIP_NAVI12) 412 return 0; 413 414 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 415 if (!cmd) 416 return -ENOMEM; 417 418 cmd->cmd_id = GFX_CMD_ID_CLEAR_VF_FW; 419 420 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 421 kfree(cmd); 422 423 return ret; 424 } 425 426 static bool psp_skip_tmr(struct psp_context *psp) 427 { 428 switch (psp->adev->asic_type) { 429 case CHIP_NAVI12: 430 case CHIP_SIENNA_CICHLID: 431 return true; 432 default: 433 return false; 434 } 435 } 436 437 static int psp_tmr_load(struct psp_context *psp) 438 { 439 int ret; 440 struct psp_gfx_cmd_resp *cmd; 441 442 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 443 * Already set up by host driver. 444 */ 445 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 446 return 0; 447 448 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 449 if (!cmd) 450 return -ENOMEM; 451 452 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, 453 amdgpu_bo_size(psp->tmr_bo)); 454 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 455 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 456 457 ret = psp_cmd_submit_buf(psp, NULL, cmd, 458 psp->fence_buf_mc_addr); 459 460 kfree(cmd); 461 462 return ret; 463 } 464 465 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 466 struct psp_gfx_cmd_resp *cmd) 467 { 468 if (amdgpu_sriov_vf(psp->adev)) 469 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 470 else 471 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 472 } 473 474 static int psp_tmr_unload(struct psp_context *psp) 475 { 476 int ret; 477 struct psp_gfx_cmd_resp *cmd; 478 479 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 480 if (!cmd) 481 return -ENOMEM; 482 483 psp_prep_tmr_unload_cmd_buf(psp, cmd); 484 DRM_INFO("free PSP TMR buffer\n"); 485 486 ret = psp_cmd_submit_buf(psp, NULL, cmd, 487 psp->fence_buf_mc_addr); 488 489 kfree(cmd); 490 491 return ret; 492 } 493 494 static int psp_tmr_terminate(struct psp_context *psp) 495 { 496 int ret; 497 void *tmr_buf; 498 void **pptr; 499 500 ret = psp_tmr_unload(psp); 501 if (ret) 502 return ret; 503 504 /* free TMR memory buffer */ 505 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 506 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 507 508 return 0; 509 } 510 511 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 512 uint64_t asd_mc, uint32_t size) 513 { 514 cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; 515 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); 516 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); 517 cmd->cmd.cmd_load_ta.app_len = size; 518 519 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; 520 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; 521 cmd->cmd.cmd_load_ta.cmd_buf_len = 0; 522 } 523 524 static int psp_asd_load(struct psp_context *psp) 525 { 526 int ret; 527 struct psp_gfx_cmd_resp *cmd; 528 529 /* If PSP version doesn't match ASD version, asd loading will be failed. 530 * add workaround to bypass it for sriov now. 531 * TODO: add version check to make it common 532 */ 533 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw) 534 return 0; 535 536 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 537 if (!cmd) 538 return -ENOMEM; 539 540 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 541 memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); 542 543 psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 544 psp->asd_ucode_size); 545 546 ret = psp_cmd_submit_buf(psp, NULL, cmd, 547 psp->fence_buf_mc_addr); 548 if (!ret) { 549 psp->asd_context.asd_initialized = true; 550 psp->asd_context.session_id = cmd->resp.session_id; 551 } 552 553 kfree(cmd); 554 555 return ret; 556 } 557 558 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 559 uint32_t session_id) 560 { 561 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 562 cmd->cmd.cmd_unload_ta.session_id = session_id; 563 } 564 565 static int psp_asd_unload(struct psp_context *psp) 566 { 567 int ret; 568 struct psp_gfx_cmd_resp *cmd; 569 570 if (amdgpu_sriov_vf(psp->adev)) 571 return 0; 572 573 if (!psp->asd_context.asd_initialized) 574 return 0; 575 576 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 577 if (!cmd) 578 return -ENOMEM; 579 580 psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); 581 582 ret = psp_cmd_submit_buf(psp, NULL, cmd, 583 psp->fence_buf_mc_addr); 584 if (!ret) 585 psp->asd_context.asd_initialized = false; 586 587 kfree(cmd); 588 589 return ret; 590 } 591 592 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 593 uint32_t id, uint32_t value) 594 { 595 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 596 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 597 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 598 } 599 600 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 601 uint32_t value) 602 { 603 struct psp_gfx_cmd_resp *cmd = NULL; 604 int ret = 0; 605 606 if (reg >= PSP_REG_LAST) 607 return -EINVAL; 608 609 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 610 if (!cmd) 611 return -ENOMEM; 612 613 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 614 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 615 616 kfree(cmd); 617 return ret; 618 } 619 620 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 621 uint64_t ta_bin_mc, 622 uint32_t ta_bin_size, 623 uint64_t ta_shared_mc, 624 uint32_t ta_shared_size) 625 { 626 cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 627 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 628 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 629 cmd->cmd.cmd_load_ta.app_len = ta_bin_size; 630 631 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); 632 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); 633 cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; 634 } 635 636 static int psp_xgmi_init_shared_buf(struct psp_context *psp) 637 { 638 int ret; 639 640 /* 641 * Allocate 16k memory aligned to 4k from Frame Buffer (local 642 * physical) for xgmi ta <-> Driver 643 */ 644 ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, 645 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 646 &psp->xgmi_context.xgmi_shared_bo, 647 &psp->xgmi_context.xgmi_shared_mc_addr, 648 &psp->xgmi_context.xgmi_shared_buf); 649 650 return ret; 651 } 652 653 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 654 uint32_t ta_cmd_id, 655 uint32_t session_id) 656 { 657 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 658 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 659 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 660 } 661 662 static int psp_ta_invoke(struct psp_context *psp, 663 uint32_t ta_cmd_id, 664 uint32_t session_id) 665 { 666 int ret; 667 struct psp_gfx_cmd_resp *cmd; 668 669 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 670 if (!cmd) 671 return -ENOMEM; 672 673 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); 674 675 ret = psp_cmd_submit_buf(psp, NULL, cmd, 676 psp->fence_buf_mc_addr); 677 678 kfree(cmd); 679 680 return ret; 681 } 682 683 static int psp_xgmi_load(struct psp_context *psp) 684 { 685 int ret; 686 struct psp_gfx_cmd_resp *cmd; 687 688 /* 689 * TODO: bypass the loading in sriov for now 690 */ 691 692 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 693 if (!cmd) 694 return -ENOMEM; 695 696 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 697 memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); 698 699 psp_prep_ta_load_cmd_buf(cmd, 700 psp->fw_pri_mc_addr, 701 psp->ta_xgmi_ucode_size, 702 psp->xgmi_context.xgmi_shared_mc_addr, 703 PSP_XGMI_SHARED_MEM_SIZE); 704 705 ret = psp_cmd_submit_buf(psp, NULL, cmd, 706 psp->fence_buf_mc_addr); 707 708 if (!ret) { 709 psp->xgmi_context.initialized = 1; 710 psp->xgmi_context.session_id = cmd->resp.session_id; 711 } 712 713 kfree(cmd); 714 715 return ret; 716 } 717 718 static int psp_xgmi_unload(struct psp_context *psp) 719 { 720 int ret; 721 struct psp_gfx_cmd_resp *cmd; 722 struct amdgpu_device *adev = psp->adev; 723 724 /* XGMI TA unload currently is not supported on Arcturus */ 725 if (adev->asic_type == CHIP_ARCTURUS) 726 return 0; 727 728 /* 729 * TODO: bypass the unloading in sriov for now 730 */ 731 732 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 733 if (!cmd) 734 return -ENOMEM; 735 736 psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); 737 738 ret = psp_cmd_submit_buf(psp, NULL, cmd, 739 psp->fence_buf_mc_addr); 740 741 kfree(cmd); 742 743 return ret; 744 } 745 746 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 747 { 748 return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); 749 } 750 751 int psp_xgmi_terminate(struct psp_context *psp) 752 { 753 int ret; 754 755 if (!psp->xgmi_context.initialized) 756 return 0; 757 758 ret = psp_xgmi_unload(psp); 759 if (ret) 760 return ret; 761 762 psp->xgmi_context.initialized = 0; 763 764 /* free xgmi shared memory */ 765 amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, 766 &psp->xgmi_context.xgmi_shared_mc_addr, 767 &psp->xgmi_context.xgmi_shared_buf); 768 769 return 0; 770 } 771 772 int psp_xgmi_initialize(struct psp_context *psp) 773 { 774 struct ta_xgmi_shared_memory *xgmi_cmd; 775 int ret; 776 777 if (!psp->adev->psp.ta_fw || 778 !psp->adev->psp.ta_xgmi_ucode_size || 779 !psp->adev->psp.ta_xgmi_start_addr) 780 return -ENOENT; 781 782 if (!psp->xgmi_context.initialized) { 783 ret = psp_xgmi_init_shared_buf(psp); 784 if (ret) 785 return ret; 786 } 787 788 /* Load XGMI TA */ 789 ret = psp_xgmi_load(psp); 790 if (ret) 791 return ret; 792 793 /* Initialize XGMI session */ 794 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); 795 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 796 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 797 798 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 799 800 return ret; 801 } 802 803 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 804 { 805 struct ta_xgmi_shared_memory *xgmi_cmd; 806 int ret; 807 808 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 809 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 810 811 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 812 813 /* Invoke xgmi ta to get hive id */ 814 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 815 if (ret) 816 return ret; 817 818 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 819 820 return 0; 821 } 822 823 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 824 { 825 struct ta_xgmi_shared_memory *xgmi_cmd; 826 int ret; 827 828 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 829 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 830 831 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 832 833 /* Invoke xgmi ta to get the node id */ 834 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 835 if (ret) 836 return ret; 837 838 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 839 840 return 0; 841 } 842 843 int psp_xgmi_get_topology_info(struct psp_context *psp, 844 int number_devices, 845 struct psp_xgmi_topology_info *topology) 846 { 847 struct ta_xgmi_shared_memory *xgmi_cmd; 848 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 849 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 850 int i; 851 int ret; 852 853 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 854 return -EINVAL; 855 856 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 857 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 858 859 /* Fill in the shared memory with topology information as input */ 860 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 861 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO; 862 topology_info_input->num_nodes = number_devices; 863 864 for (i = 0; i < topology_info_input->num_nodes; i++) { 865 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 866 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 867 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 868 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 869 } 870 871 /* Invoke xgmi ta to get the topology information */ 872 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO); 873 if (ret) 874 return ret; 875 876 /* Read the output topology information from the shared memory */ 877 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 878 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 879 for (i = 0; i < topology->num_nodes; i++) { 880 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 881 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 882 topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled; 883 topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine; 884 } 885 886 return 0; 887 } 888 889 int psp_xgmi_set_topology_info(struct psp_context *psp, 890 int number_devices, 891 struct psp_xgmi_topology_info *topology) 892 { 893 struct ta_xgmi_shared_memory *xgmi_cmd; 894 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 895 int i; 896 897 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 898 return -EINVAL; 899 900 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 901 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 902 903 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 904 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 905 topology_info_input->num_nodes = number_devices; 906 907 for (i = 0; i < topology_info_input->num_nodes; i++) { 908 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 909 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 910 topology_info_input->nodes[i].is_sharing_enabled = 1; 911 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 912 } 913 914 /* Invoke xgmi ta to set topology information */ 915 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 916 } 917 918 // ras begin 919 static int psp_ras_init_shared_buf(struct psp_context *psp) 920 { 921 int ret; 922 923 /* 924 * Allocate 16k memory aligned to 4k from Frame Buffer (local 925 * physical) for ras ta <-> Driver 926 */ 927 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE, 928 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 929 &psp->ras.ras_shared_bo, 930 &psp->ras.ras_shared_mc_addr, 931 &psp->ras.ras_shared_buf); 932 933 return ret; 934 } 935 936 static int psp_ras_load(struct psp_context *psp) 937 { 938 int ret; 939 struct psp_gfx_cmd_resp *cmd; 940 struct ta_ras_shared_memory *ras_cmd; 941 942 /* 943 * TODO: bypass the loading in sriov for now 944 */ 945 if (amdgpu_sriov_vf(psp->adev)) 946 return 0; 947 948 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 949 if (!cmd) 950 return -ENOMEM; 951 952 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 953 memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); 954 955 psp_prep_ta_load_cmd_buf(cmd, 956 psp->fw_pri_mc_addr, 957 psp->ta_ras_ucode_size, 958 psp->ras.ras_shared_mc_addr, 959 PSP_RAS_SHARED_MEM_SIZE); 960 961 ret = psp_cmd_submit_buf(psp, NULL, cmd, 962 psp->fence_buf_mc_addr); 963 964 ras_cmd = (struct ta_ras_shared_memory*)psp->ras.ras_shared_buf; 965 966 if (!ret) { 967 psp->ras.session_id = cmd->resp.session_id; 968 969 if (!ras_cmd->ras_status) 970 psp->ras.ras_initialized = true; 971 else 972 dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 973 } 974 975 if (ret || ras_cmd->ras_status) 976 amdgpu_ras_fini(psp->adev); 977 978 kfree(cmd); 979 980 return ret; 981 } 982 983 static int psp_ras_unload(struct psp_context *psp) 984 { 985 int ret; 986 struct psp_gfx_cmd_resp *cmd; 987 988 /* 989 * TODO: bypass the unloading in sriov for now 990 */ 991 if (amdgpu_sriov_vf(psp->adev)) 992 return 0; 993 994 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 995 if (!cmd) 996 return -ENOMEM; 997 998 psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id); 999 1000 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1001 psp->fence_buf_mc_addr); 1002 1003 kfree(cmd); 1004 1005 return ret; 1006 } 1007 1008 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1009 { 1010 struct ta_ras_shared_memory *ras_cmd; 1011 int ret; 1012 1013 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 1014 1015 /* 1016 * TODO: bypass the loading in sriov for now 1017 */ 1018 if (amdgpu_sriov_vf(psp->adev)) 1019 return 0; 1020 1021 ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id); 1022 1023 if (amdgpu_ras_intr_triggered()) 1024 return ret; 1025 1026 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) 1027 { 1028 DRM_WARN("RAS: Unsupported Interface"); 1029 return -EINVAL; 1030 } 1031 1032 if (!ret) { 1033 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1034 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1035 1036 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1037 } 1038 else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1039 dev_warn(psp->adev->dev, 1040 "RAS internal register access blocked\n"); 1041 } 1042 1043 return ret; 1044 } 1045 1046 int psp_ras_enable_features(struct psp_context *psp, 1047 union ta_ras_cmd_input *info, bool enable) 1048 { 1049 struct ta_ras_shared_memory *ras_cmd; 1050 int ret; 1051 1052 if (!psp->ras.ras_initialized) 1053 return -EINVAL; 1054 1055 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 1056 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1057 1058 if (enable) 1059 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 1060 else 1061 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 1062 1063 ras_cmd->ras_in_message = *info; 1064 1065 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1066 if (ret) 1067 return -EINVAL; 1068 1069 return ras_cmd->ras_status; 1070 } 1071 1072 static int psp_ras_terminate(struct psp_context *psp) 1073 { 1074 int ret; 1075 1076 /* 1077 * TODO: bypass the terminate in sriov for now 1078 */ 1079 if (amdgpu_sriov_vf(psp->adev)) 1080 return 0; 1081 1082 if (!psp->ras.ras_initialized) 1083 return 0; 1084 1085 ret = psp_ras_unload(psp); 1086 if (ret) 1087 return ret; 1088 1089 psp->ras.ras_initialized = false; 1090 1091 /* free ras shared memory */ 1092 amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, 1093 &psp->ras.ras_shared_mc_addr, 1094 &psp->ras.ras_shared_buf); 1095 1096 return 0; 1097 } 1098 1099 static int psp_ras_initialize(struct psp_context *psp) 1100 { 1101 int ret; 1102 1103 /* 1104 * TODO: bypass the initialize in sriov for now 1105 */ 1106 if (amdgpu_sriov_vf(psp->adev)) 1107 return 0; 1108 1109 if (!psp->adev->psp.ta_ras_ucode_size || 1110 !psp->adev->psp.ta_ras_start_addr) { 1111 dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n"); 1112 return 0; 1113 } 1114 1115 if (!psp->ras.ras_initialized) { 1116 ret = psp_ras_init_shared_buf(psp); 1117 if (ret) 1118 return ret; 1119 } 1120 1121 ret = psp_ras_load(psp); 1122 if (ret) 1123 return ret; 1124 1125 return 0; 1126 } 1127 1128 int psp_ras_trigger_error(struct psp_context *psp, 1129 struct ta_ras_trigger_error_input *info) 1130 { 1131 struct ta_ras_shared_memory *ras_cmd; 1132 int ret; 1133 1134 if (!psp->ras.ras_initialized) 1135 return -EINVAL; 1136 1137 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 1138 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1139 1140 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; 1141 ras_cmd->ras_in_message.trigger_error = *info; 1142 1143 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1144 if (ret) 1145 return -EINVAL; 1146 1147 /* If err_event_athub occurs error inject was successful, however 1148 return status from TA is no long reliable */ 1149 if (amdgpu_ras_intr_triggered()) 1150 return 0; 1151 1152 return ras_cmd->ras_status; 1153 } 1154 // ras end 1155 1156 // HDCP start 1157 static int psp_hdcp_init_shared_buf(struct psp_context *psp) 1158 { 1159 int ret; 1160 1161 /* 1162 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1163 * physical) for hdcp ta <-> Driver 1164 */ 1165 ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, 1166 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1167 &psp->hdcp_context.hdcp_shared_bo, 1168 &psp->hdcp_context.hdcp_shared_mc_addr, 1169 &psp->hdcp_context.hdcp_shared_buf); 1170 1171 return ret; 1172 } 1173 1174 static int psp_hdcp_load(struct psp_context *psp) 1175 { 1176 int ret; 1177 struct psp_gfx_cmd_resp *cmd; 1178 1179 /* 1180 * TODO: bypass the loading in sriov for now 1181 */ 1182 if (amdgpu_sriov_vf(psp->adev)) 1183 return 0; 1184 1185 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1186 if (!cmd) 1187 return -ENOMEM; 1188 1189 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1190 memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, 1191 psp->ta_hdcp_ucode_size); 1192 1193 psp_prep_ta_load_cmd_buf(cmd, 1194 psp->fw_pri_mc_addr, 1195 psp->ta_hdcp_ucode_size, 1196 psp->hdcp_context.hdcp_shared_mc_addr, 1197 PSP_HDCP_SHARED_MEM_SIZE); 1198 1199 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1200 1201 if (!ret) { 1202 psp->hdcp_context.hdcp_initialized = true; 1203 psp->hdcp_context.session_id = cmd->resp.session_id; 1204 mutex_init(&psp->hdcp_context.mutex); 1205 } 1206 1207 kfree(cmd); 1208 1209 return ret; 1210 } 1211 static int psp_hdcp_initialize(struct psp_context *psp) 1212 { 1213 int ret; 1214 1215 /* 1216 * TODO: bypass the initialize in sriov for now 1217 */ 1218 if (amdgpu_sriov_vf(psp->adev)) 1219 return 0; 1220 1221 if (!psp->adev->psp.ta_hdcp_ucode_size || 1222 !psp->adev->psp.ta_hdcp_start_addr) { 1223 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1224 return 0; 1225 } 1226 1227 if (!psp->hdcp_context.hdcp_initialized) { 1228 ret = psp_hdcp_init_shared_buf(psp); 1229 if (ret) 1230 return ret; 1231 } 1232 1233 ret = psp_hdcp_load(psp); 1234 if (ret) 1235 return ret; 1236 1237 return 0; 1238 } 1239 1240 static int psp_hdcp_unload(struct psp_context *psp) 1241 { 1242 int ret; 1243 struct psp_gfx_cmd_resp *cmd; 1244 1245 /* 1246 * TODO: bypass the unloading in sriov for now 1247 */ 1248 if (amdgpu_sriov_vf(psp->adev)) 1249 return 0; 1250 1251 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1252 if (!cmd) 1253 return -ENOMEM; 1254 1255 psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); 1256 1257 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1258 1259 kfree(cmd); 1260 1261 return ret; 1262 } 1263 1264 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1265 { 1266 /* 1267 * TODO: bypass the loading in sriov for now 1268 */ 1269 if (amdgpu_sriov_vf(psp->adev)) 1270 return 0; 1271 1272 return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id); 1273 } 1274 1275 static int psp_hdcp_terminate(struct psp_context *psp) 1276 { 1277 int ret; 1278 1279 /* 1280 * TODO: bypass the terminate in sriov for now 1281 */ 1282 if (amdgpu_sriov_vf(psp->adev)) 1283 return 0; 1284 1285 if (!psp->hdcp_context.hdcp_initialized) 1286 return 0; 1287 1288 ret = psp_hdcp_unload(psp); 1289 if (ret) 1290 return ret; 1291 1292 psp->hdcp_context.hdcp_initialized = false; 1293 1294 /* free hdcp shared memory */ 1295 amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, 1296 &psp->hdcp_context.hdcp_shared_mc_addr, 1297 &psp->hdcp_context.hdcp_shared_buf); 1298 1299 return 0; 1300 } 1301 // HDCP end 1302 1303 // DTM start 1304 static int psp_dtm_init_shared_buf(struct psp_context *psp) 1305 { 1306 int ret; 1307 1308 /* 1309 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1310 * physical) for dtm ta <-> Driver 1311 */ 1312 ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, 1313 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1314 &psp->dtm_context.dtm_shared_bo, 1315 &psp->dtm_context.dtm_shared_mc_addr, 1316 &psp->dtm_context.dtm_shared_buf); 1317 1318 return ret; 1319 } 1320 1321 static int psp_dtm_load(struct psp_context *psp) 1322 { 1323 int ret; 1324 struct psp_gfx_cmd_resp *cmd; 1325 1326 /* 1327 * TODO: bypass the loading in sriov for now 1328 */ 1329 if (amdgpu_sriov_vf(psp->adev)) 1330 return 0; 1331 1332 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1333 if (!cmd) 1334 return -ENOMEM; 1335 1336 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1337 memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); 1338 1339 psp_prep_ta_load_cmd_buf(cmd, 1340 psp->fw_pri_mc_addr, 1341 psp->ta_dtm_ucode_size, 1342 psp->dtm_context.dtm_shared_mc_addr, 1343 PSP_DTM_SHARED_MEM_SIZE); 1344 1345 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1346 1347 if (!ret) { 1348 psp->dtm_context.dtm_initialized = true; 1349 psp->dtm_context.session_id = cmd->resp.session_id; 1350 mutex_init(&psp->dtm_context.mutex); 1351 } 1352 1353 kfree(cmd); 1354 1355 return ret; 1356 } 1357 1358 static int psp_dtm_initialize(struct psp_context *psp) 1359 { 1360 int ret; 1361 1362 /* 1363 * TODO: bypass the initialize in sriov for now 1364 */ 1365 if (amdgpu_sriov_vf(psp->adev)) 1366 return 0; 1367 1368 if (!psp->adev->psp.ta_dtm_ucode_size || 1369 !psp->adev->psp.ta_dtm_start_addr) { 1370 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1371 return 0; 1372 } 1373 1374 if (!psp->dtm_context.dtm_initialized) { 1375 ret = psp_dtm_init_shared_buf(psp); 1376 if (ret) 1377 return ret; 1378 } 1379 1380 ret = psp_dtm_load(psp); 1381 if (ret) 1382 return ret; 1383 1384 return 0; 1385 } 1386 1387 static int psp_dtm_unload(struct psp_context *psp) 1388 { 1389 int ret; 1390 struct psp_gfx_cmd_resp *cmd; 1391 1392 /* 1393 * TODO: bypass the unloading in sriov for now 1394 */ 1395 if (amdgpu_sriov_vf(psp->adev)) 1396 return 0; 1397 1398 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1399 if (!cmd) 1400 return -ENOMEM; 1401 1402 psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id); 1403 1404 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1405 1406 kfree(cmd); 1407 1408 return ret; 1409 } 1410 1411 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1412 { 1413 /* 1414 * TODO: bypass the loading in sriov for now 1415 */ 1416 if (amdgpu_sriov_vf(psp->adev)) 1417 return 0; 1418 1419 return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id); 1420 } 1421 1422 static int psp_dtm_terminate(struct psp_context *psp) 1423 { 1424 int ret; 1425 1426 /* 1427 * TODO: bypass the terminate in sriov for now 1428 */ 1429 if (amdgpu_sriov_vf(psp->adev)) 1430 return 0; 1431 1432 if (!psp->dtm_context.dtm_initialized) 1433 return 0; 1434 1435 ret = psp_dtm_unload(psp); 1436 if (ret) 1437 return ret; 1438 1439 psp->dtm_context.dtm_initialized = false; 1440 1441 /* free hdcp shared memory */ 1442 amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, 1443 &psp->dtm_context.dtm_shared_mc_addr, 1444 &psp->dtm_context.dtm_shared_buf); 1445 1446 return 0; 1447 } 1448 // DTM end 1449 1450 // RAP start 1451 static int psp_rap_init_shared_buf(struct psp_context *psp) 1452 { 1453 int ret; 1454 1455 /* 1456 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1457 * physical) for rap ta <-> Driver 1458 */ 1459 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE, 1460 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1461 &psp->rap_context.rap_shared_bo, 1462 &psp->rap_context.rap_shared_mc_addr, 1463 &psp->rap_context.rap_shared_buf); 1464 1465 return ret; 1466 } 1467 1468 static int psp_rap_load(struct psp_context *psp) 1469 { 1470 int ret; 1471 struct psp_gfx_cmd_resp *cmd; 1472 1473 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1474 if (!cmd) 1475 return -ENOMEM; 1476 1477 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1478 memcpy(psp->fw_pri_buf, psp->ta_rap_start_addr, psp->ta_rap_ucode_size); 1479 1480 psp_prep_ta_load_cmd_buf(cmd, 1481 psp->fw_pri_mc_addr, 1482 psp->ta_rap_ucode_size, 1483 psp->rap_context.rap_shared_mc_addr, 1484 PSP_RAP_SHARED_MEM_SIZE); 1485 1486 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1487 1488 if (!ret) { 1489 psp->rap_context.rap_initialized = true; 1490 psp->rap_context.session_id = cmd->resp.session_id; 1491 mutex_init(&psp->rap_context.mutex); 1492 } 1493 1494 kfree(cmd); 1495 1496 return ret; 1497 } 1498 1499 static int psp_rap_unload(struct psp_context *psp) 1500 { 1501 int ret; 1502 struct psp_gfx_cmd_resp *cmd; 1503 1504 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1505 if (!cmd) 1506 return -ENOMEM; 1507 1508 psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id); 1509 1510 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1511 1512 kfree(cmd); 1513 1514 return ret; 1515 } 1516 1517 static int psp_rap_initialize(struct psp_context *psp) 1518 { 1519 int ret; 1520 1521 /* 1522 * TODO: bypass the initialize in sriov for now 1523 */ 1524 if (amdgpu_sriov_vf(psp->adev)) 1525 return 0; 1526 1527 if (!psp->adev->psp.ta_rap_ucode_size || 1528 !psp->adev->psp.ta_rap_start_addr) { 1529 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 1530 return 0; 1531 } 1532 1533 if (!psp->rap_context.rap_initialized) { 1534 ret = psp_rap_init_shared_buf(psp); 1535 if (ret) 1536 return ret; 1537 } 1538 1539 ret = psp_rap_load(psp); 1540 if (ret) 1541 return ret; 1542 1543 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE); 1544 if (ret != TA_RAP_STATUS__SUCCESS) { 1545 psp_rap_unload(psp); 1546 1547 amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo, 1548 &psp->rap_context.rap_shared_mc_addr, 1549 &psp->rap_context.rap_shared_buf); 1550 1551 psp->rap_context.rap_initialized = false; 1552 1553 dev_warn(psp->adev->dev, "RAP TA initialize fail.\n"); 1554 return -EINVAL; 1555 } 1556 1557 return 0; 1558 } 1559 1560 static int psp_rap_terminate(struct psp_context *psp) 1561 { 1562 int ret; 1563 1564 if (!psp->rap_context.rap_initialized) 1565 return 0; 1566 1567 ret = psp_rap_unload(psp); 1568 1569 psp->rap_context.rap_initialized = false; 1570 1571 /* free rap shared memory */ 1572 amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo, 1573 &psp->rap_context.rap_shared_mc_addr, 1574 &psp->rap_context.rap_shared_buf); 1575 1576 return ret; 1577 } 1578 1579 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1580 { 1581 struct ta_rap_shared_memory *rap_cmd; 1582 int ret; 1583 1584 if (!psp->rap_context.rap_initialized) 1585 return -EINVAL; 1586 1587 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 1588 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 1589 return -EINVAL; 1590 1591 mutex_lock(&psp->rap_context.mutex); 1592 1593 rap_cmd = (struct ta_rap_shared_memory *) 1594 psp->rap_context.rap_shared_buf; 1595 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 1596 1597 rap_cmd->cmd_id = ta_cmd_id; 1598 rap_cmd->validation_method_id = METHOD_A; 1599 1600 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id); 1601 if (ret) { 1602 mutex_unlock(&psp->rap_context.mutex); 1603 return ret; 1604 } 1605 1606 mutex_unlock(&psp->rap_context.mutex); 1607 1608 return rap_cmd->rap_status; 1609 } 1610 // RAP end 1611 1612 static int psp_hw_start(struct psp_context *psp) 1613 { 1614 struct amdgpu_device *adev = psp->adev; 1615 int ret; 1616 1617 if (!amdgpu_sriov_vf(adev)) { 1618 if (psp->kdb_bin_size && 1619 (psp->funcs->bootloader_load_kdb != NULL)) { 1620 ret = psp_bootloader_load_kdb(psp); 1621 if (ret) { 1622 DRM_ERROR("PSP load kdb failed!\n"); 1623 return ret; 1624 } 1625 } 1626 1627 if (psp->spl_bin_size) { 1628 ret = psp_bootloader_load_spl(psp); 1629 if (ret) { 1630 DRM_ERROR("PSP load spl failed!\n"); 1631 return ret; 1632 } 1633 } 1634 1635 ret = psp_bootloader_load_sysdrv(psp); 1636 if (ret) { 1637 DRM_ERROR("PSP load sysdrv failed!\n"); 1638 return ret; 1639 } 1640 1641 ret = psp_bootloader_load_sos(psp); 1642 if (ret) { 1643 DRM_ERROR("PSP load sos failed!\n"); 1644 return ret; 1645 } 1646 } 1647 1648 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 1649 if (ret) { 1650 DRM_ERROR("PSP create ring failed!\n"); 1651 return ret; 1652 } 1653 1654 ret = psp_clear_vf_fw(psp); 1655 if (ret) { 1656 DRM_ERROR("PSP clear vf fw!\n"); 1657 return ret; 1658 } 1659 1660 ret = psp_tmr_init(psp); 1661 if (ret) { 1662 DRM_ERROR("PSP tmr init failed!\n"); 1663 return ret; 1664 } 1665 1666 /* 1667 * For ASICs with DF Cstate management centralized 1668 * to PMFW, TMR setup should be performed after PMFW 1669 * loaded and before other non-psp firmware loaded. 1670 */ 1671 if (psp->pmfw_centralized_cstate_management) { 1672 ret = psp_load_smu_fw(psp); 1673 if (ret) 1674 return ret; 1675 } 1676 1677 ret = psp_tmr_load(psp); 1678 if (ret) { 1679 DRM_ERROR("PSP load tmr failed!\n"); 1680 return ret; 1681 } 1682 1683 return 0; 1684 } 1685 1686 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 1687 enum psp_gfx_fw_type *type) 1688 { 1689 switch (ucode->ucode_id) { 1690 case AMDGPU_UCODE_ID_SDMA0: 1691 *type = GFX_FW_TYPE_SDMA0; 1692 break; 1693 case AMDGPU_UCODE_ID_SDMA1: 1694 *type = GFX_FW_TYPE_SDMA1; 1695 break; 1696 case AMDGPU_UCODE_ID_SDMA2: 1697 *type = GFX_FW_TYPE_SDMA2; 1698 break; 1699 case AMDGPU_UCODE_ID_SDMA3: 1700 *type = GFX_FW_TYPE_SDMA3; 1701 break; 1702 case AMDGPU_UCODE_ID_SDMA4: 1703 *type = GFX_FW_TYPE_SDMA4; 1704 break; 1705 case AMDGPU_UCODE_ID_SDMA5: 1706 *type = GFX_FW_TYPE_SDMA5; 1707 break; 1708 case AMDGPU_UCODE_ID_SDMA6: 1709 *type = GFX_FW_TYPE_SDMA6; 1710 break; 1711 case AMDGPU_UCODE_ID_SDMA7: 1712 *type = GFX_FW_TYPE_SDMA7; 1713 break; 1714 case AMDGPU_UCODE_ID_CP_MES: 1715 *type = GFX_FW_TYPE_CP_MES; 1716 break; 1717 case AMDGPU_UCODE_ID_CP_MES_DATA: 1718 *type = GFX_FW_TYPE_MES_STACK; 1719 break; 1720 case AMDGPU_UCODE_ID_CP_CE: 1721 *type = GFX_FW_TYPE_CP_CE; 1722 break; 1723 case AMDGPU_UCODE_ID_CP_PFP: 1724 *type = GFX_FW_TYPE_CP_PFP; 1725 break; 1726 case AMDGPU_UCODE_ID_CP_ME: 1727 *type = GFX_FW_TYPE_CP_ME; 1728 break; 1729 case AMDGPU_UCODE_ID_CP_MEC1: 1730 *type = GFX_FW_TYPE_CP_MEC; 1731 break; 1732 case AMDGPU_UCODE_ID_CP_MEC1_JT: 1733 *type = GFX_FW_TYPE_CP_MEC_ME1; 1734 break; 1735 case AMDGPU_UCODE_ID_CP_MEC2: 1736 *type = GFX_FW_TYPE_CP_MEC; 1737 break; 1738 case AMDGPU_UCODE_ID_CP_MEC2_JT: 1739 *type = GFX_FW_TYPE_CP_MEC_ME2; 1740 break; 1741 case AMDGPU_UCODE_ID_RLC_G: 1742 *type = GFX_FW_TYPE_RLC_G; 1743 break; 1744 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 1745 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 1746 break; 1747 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 1748 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 1749 break; 1750 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 1751 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 1752 break; 1753 case AMDGPU_UCODE_ID_SMC: 1754 *type = GFX_FW_TYPE_SMU; 1755 break; 1756 case AMDGPU_UCODE_ID_UVD: 1757 *type = GFX_FW_TYPE_UVD; 1758 break; 1759 case AMDGPU_UCODE_ID_UVD1: 1760 *type = GFX_FW_TYPE_UVD1; 1761 break; 1762 case AMDGPU_UCODE_ID_VCE: 1763 *type = GFX_FW_TYPE_VCE; 1764 break; 1765 case AMDGPU_UCODE_ID_VCN: 1766 *type = GFX_FW_TYPE_VCN; 1767 break; 1768 case AMDGPU_UCODE_ID_VCN1: 1769 *type = GFX_FW_TYPE_VCN1; 1770 break; 1771 case AMDGPU_UCODE_ID_DMCU_ERAM: 1772 *type = GFX_FW_TYPE_DMCU_ERAM; 1773 break; 1774 case AMDGPU_UCODE_ID_DMCU_INTV: 1775 *type = GFX_FW_TYPE_DMCU_ISR; 1776 break; 1777 case AMDGPU_UCODE_ID_VCN0_RAM: 1778 *type = GFX_FW_TYPE_VCN0_RAM; 1779 break; 1780 case AMDGPU_UCODE_ID_VCN1_RAM: 1781 *type = GFX_FW_TYPE_VCN1_RAM; 1782 break; 1783 case AMDGPU_UCODE_ID_DMCUB: 1784 *type = GFX_FW_TYPE_DMUB; 1785 break; 1786 case AMDGPU_UCODE_ID_MAXIMUM: 1787 default: 1788 return -EINVAL; 1789 } 1790 1791 return 0; 1792 } 1793 1794 static void psp_print_fw_hdr(struct psp_context *psp, 1795 struct amdgpu_firmware_info *ucode) 1796 { 1797 struct amdgpu_device *adev = psp->adev; 1798 struct common_firmware_header *hdr; 1799 1800 switch (ucode->ucode_id) { 1801 case AMDGPU_UCODE_ID_SDMA0: 1802 case AMDGPU_UCODE_ID_SDMA1: 1803 case AMDGPU_UCODE_ID_SDMA2: 1804 case AMDGPU_UCODE_ID_SDMA3: 1805 case AMDGPU_UCODE_ID_SDMA4: 1806 case AMDGPU_UCODE_ID_SDMA5: 1807 case AMDGPU_UCODE_ID_SDMA6: 1808 case AMDGPU_UCODE_ID_SDMA7: 1809 hdr = (struct common_firmware_header *) 1810 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 1811 amdgpu_ucode_print_sdma_hdr(hdr); 1812 break; 1813 case AMDGPU_UCODE_ID_CP_CE: 1814 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 1815 amdgpu_ucode_print_gfx_hdr(hdr); 1816 break; 1817 case AMDGPU_UCODE_ID_CP_PFP: 1818 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 1819 amdgpu_ucode_print_gfx_hdr(hdr); 1820 break; 1821 case AMDGPU_UCODE_ID_CP_ME: 1822 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 1823 amdgpu_ucode_print_gfx_hdr(hdr); 1824 break; 1825 case AMDGPU_UCODE_ID_CP_MEC1: 1826 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 1827 amdgpu_ucode_print_gfx_hdr(hdr); 1828 break; 1829 case AMDGPU_UCODE_ID_RLC_G: 1830 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 1831 amdgpu_ucode_print_rlc_hdr(hdr); 1832 break; 1833 case AMDGPU_UCODE_ID_SMC: 1834 hdr = (struct common_firmware_header *)adev->pm.fw->data; 1835 amdgpu_ucode_print_smc_hdr(hdr); 1836 break; 1837 default: 1838 break; 1839 } 1840 } 1841 1842 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, 1843 struct psp_gfx_cmd_resp *cmd) 1844 { 1845 int ret; 1846 uint64_t fw_mem_mc_addr = ucode->mc_addr; 1847 1848 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 1849 1850 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1851 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 1852 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 1853 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 1854 1855 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 1856 if (ret) 1857 DRM_ERROR("Unknown firmware type\n"); 1858 1859 return ret; 1860 } 1861 1862 static int psp_execute_np_fw_load(struct psp_context *psp, 1863 struct amdgpu_firmware_info *ucode) 1864 { 1865 int ret = 0; 1866 1867 ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd); 1868 if (ret) 1869 return ret; 1870 1871 ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, 1872 psp->fence_buf_mc_addr); 1873 1874 return ret; 1875 } 1876 1877 static int psp_load_smu_fw(struct psp_context *psp) 1878 { 1879 int ret; 1880 struct amdgpu_device* adev = psp->adev; 1881 struct amdgpu_firmware_info *ucode = 1882 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 1883 struct amdgpu_ras *ras = psp->ras.ras; 1884 1885 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 1886 return 0; 1887 1888 1889 if (amdgpu_in_reset(adev) && ras && ras->supported) { 1890 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 1891 if (ret) { 1892 DRM_WARN("Failed to set MP1 state prepare for reload\n"); 1893 } 1894 } 1895 1896 ret = psp_execute_np_fw_load(psp, ucode); 1897 1898 if (ret) 1899 DRM_ERROR("PSP load smu failed!\n"); 1900 1901 return ret; 1902 } 1903 1904 static bool fw_load_skip_check(struct psp_context *psp, 1905 struct amdgpu_firmware_info *ucode) 1906 { 1907 if (!ucode->fw) 1908 return true; 1909 1910 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1911 (psp_smu_reload_quirk(psp) || 1912 psp->autoload_supported || 1913 psp->pmfw_centralized_cstate_management)) 1914 return true; 1915 1916 if (amdgpu_sriov_vf(psp->adev) && 1917 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 1918 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 1919 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 1920 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 1921 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 1922 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 1923 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 1924 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 1925 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G 1926 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 1927 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 1928 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 1929 || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) 1930 /*skip ucode loading in SRIOV VF */ 1931 return true; 1932 1933 if (psp->autoload_supported && 1934 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 1935 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 1936 /* skip mec JT when autoload is enabled */ 1937 return true; 1938 1939 return false; 1940 } 1941 1942 static int psp_np_fw_load(struct psp_context *psp) 1943 { 1944 int i, ret; 1945 struct amdgpu_firmware_info *ucode; 1946 struct amdgpu_device* adev = psp->adev; 1947 1948 if (psp->autoload_supported && 1949 !psp->pmfw_centralized_cstate_management) { 1950 ret = psp_load_smu_fw(psp); 1951 if (ret) 1952 return ret; 1953 } 1954 1955 for (i = 0; i < adev->firmware.max_ucodes; i++) { 1956 ucode = &adev->firmware.ucode[i]; 1957 1958 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1959 !fw_load_skip_check(psp, ucode)) { 1960 ret = psp_load_smu_fw(psp); 1961 if (ret) 1962 return ret; 1963 continue; 1964 } 1965 1966 if (fw_load_skip_check(psp, ucode)) 1967 continue; 1968 1969 if (psp->autoload_supported && 1970 (adev->asic_type == CHIP_SIENNA_CICHLID || 1971 adev->asic_type == CHIP_NAVY_FLOUNDER) && 1972 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 1973 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 1974 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 1975 /* PSP only receive one SDMA fw for sienna_cichlid, 1976 * as all four sdma fw are same */ 1977 continue; 1978 1979 psp_print_fw_hdr(psp, ucode); 1980 1981 ret = psp_execute_np_fw_load(psp, ucode); 1982 if (ret) 1983 return ret; 1984 1985 /* Start rlc autoload after psp recieved all the gfx firmware */ 1986 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 1987 AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { 1988 ret = psp_rlc_autoload_start(psp); 1989 if (ret) { 1990 DRM_ERROR("Failed to start rlc autoload\n"); 1991 return ret; 1992 } 1993 } 1994 } 1995 1996 return 0; 1997 } 1998 1999 static int psp_load_fw(struct amdgpu_device *adev) 2000 { 2001 int ret; 2002 struct psp_context *psp = &adev->psp; 2003 2004 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2005 psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ 2006 goto skip_memalloc; 2007 } 2008 2009 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 2010 if (!psp->cmd) 2011 return -ENOMEM; 2012 2013 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 2014 AMDGPU_GEM_DOMAIN_GTT, 2015 &psp->fw_pri_bo, 2016 &psp->fw_pri_mc_addr, 2017 &psp->fw_pri_buf); 2018 if (ret) 2019 goto failed; 2020 2021 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 2022 AMDGPU_GEM_DOMAIN_VRAM, 2023 &psp->fence_buf_bo, 2024 &psp->fence_buf_mc_addr, 2025 &psp->fence_buf); 2026 if (ret) 2027 goto failed; 2028 2029 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 2030 AMDGPU_GEM_DOMAIN_VRAM, 2031 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 2032 (void **)&psp->cmd_buf_mem); 2033 if (ret) 2034 goto failed; 2035 2036 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2037 2038 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2039 if (ret) { 2040 DRM_ERROR("PSP ring init failed!\n"); 2041 goto failed; 2042 } 2043 2044 skip_memalloc: 2045 ret = psp_hw_start(psp); 2046 if (ret) 2047 goto failed; 2048 2049 ret = psp_np_fw_load(psp); 2050 if (ret) 2051 goto failed; 2052 2053 ret = psp_asd_load(psp); 2054 if (ret) { 2055 DRM_ERROR("PSP load asd failed!\n"); 2056 return ret; 2057 } 2058 2059 if (psp->adev->psp.ta_fw) { 2060 ret = psp_ras_initialize(psp); 2061 if (ret) 2062 dev_err(psp->adev->dev, 2063 "RAS: Failed to initialize RAS\n"); 2064 2065 ret = psp_hdcp_initialize(psp); 2066 if (ret) 2067 dev_err(psp->adev->dev, 2068 "HDCP: Failed to initialize HDCP\n"); 2069 2070 ret = psp_dtm_initialize(psp); 2071 if (ret) 2072 dev_err(psp->adev->dev, 2073 "DTM: Failed to initialize DTM\n"); 2074 2075 ret = psp_rap_initialize(psp); 2076 if (ret) 2077 dev_err(psp->adev->dev, 2078 "RAP: Failed to initialize RAP\n"); 2079 } 2080 2081 return 0; 2082 2083 failed: 2084 /* 2085 * all cleanup jobs (xgmi terminate, ras terminate, 2086 * ring destroy, cmd/fence/fw buffers destory, 2087 * psp->cmd destory) are delayed to psp_hw_fini 2088 */ 2089 return ret; 2090 } 2091 2092 static int psp_hw_init(void *handle) 2093 { 2094 int ret; 2095 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2096 2097 mutex_lock(&adev->firmware.mutex); 2098 /* 2099 * This sequence is just used on hw_init only once, no need on 2100 * resume. 2101 */ 2102 ret = amdgpu_ucode_init_bo(adev); 2103 if (ret) 2104 goto failed; 2105 2106 ret = psp_load_fw(adev); 2107 if (ret) { 2108 DRM_ERROR("PSP firmware loading failed\n"); 2109 goto failed; 2110 } 2111 2112 mutex_unlock(&adev->firmware.mutex); 2113 return 0; 2114 2115 failed: 2116 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 2117 mutex_unlock(&adev->firmware.mutex); 2118 return -EINVAL; 2119 } 2120 2121 static int psp_hw_fini(void *handle) 2122 { 2123 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2124 struct psp_context *psp = &adev->psp; 2125 int ret; 2126 2127 if (psp->adev->psp.ta_fw) { 2128 psp_ras_terminate(psp); 2129 psp_rap_terminate(psp); 2130 psp_dtm_terminate(psp); 2131 psp_hdcp_terminate(psp); 2132 } 2133 2134 psp_asd_unload(psp); 2135 ret = psp_clear_vf_fw(psp); 2136 if (ret) { 2137 DRM_ERROR("PSP clear vf fw!\n"); 2138 return ret; 2139 } 2140 2141 psp_tmr_terminate(psp); 2142 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2143 2144 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 2145 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 2146 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 2147 &psp->fence_buf_mc_addr, &psp->fence_buf); 2148 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 2149 (void **)&psp->cmd_buf_mem); 2150 2151 kfree(psp->cmd); 2152 psp->cmd = NULL; 2153 2154 return 0; 2155 } 2156 2157 static int psp_suspend(void *handle) 2158 { 2159 int ret; 2160 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2161 struct psp_context *psp = &adev->psp; 2162 2163 if (adev->gmc.xgmi.num_physical_nodes > 1 && 2164 psp->xgmi_context.initialized == 1) { 2165 ret = psp_xgmi_terminate(psp); 2166 if (ret) { 2167 DRM_ERROR("Failed to terminate xgmi ta\n"); 2168 return ret; 2169 } 2170 } 2171 2172 if (psp->adev->psp.ta_fw) { 2173 ret = psp_ras_terminate(psp); 2174 if (ret) { 2175 DRM_ERROR("Failed to terminate ras ta\n"); 2176 return ret; 2177 } 2178 ret = psp_hdcp_terminate(psp); 2179 if (ret) { 2180 DRM_ERROR("Failed to terminate hdcp ta\n"); 2181 return ret; 2182 } 2183 ret = psp_dtm_terminate(psp); 2184 if (ret) { 2185 DRM_ERROR("Failed to terminate dtm ta\n"); 2186 return ret; 2187 } 2188 ret = psp_rap_terminate(psp); 2189 if (ret) { 2190 DRM_ERROR("Failed to terminate rap ta\n"); 2191 return ret; 2192 } 2193 } 2194 2195 ret = psp_asd_unload(psp); 2196 if (ret) { 2197 DRM_ERROR("Failed to unload asd\n"); 2198 return ret; 2199 } 2200 2201 ret = psp_tmr_terminate(psp); 2202 if (ret) { 2203 DRM_ERROR("Failed to terminate tmr\n"); 2204 return ret; 2205 } 2206 2207 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 2208 if (ret) { 2209 DRM_ERROR("PSP ring stop failed\n"); 2210 return ret; 2211 } 2212 2213 return 0; 2214 } 2215 2216 static int psp_resume(void *handle) 2217 { 2218 int ret; 2219 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2220 struct psp_context *psp = &adev->psp; 2221 2222 DRM_INFO("PSP is resuming...\n"); 2223 2224 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 2225 if (ret) { 2226 DRM_ERROR("Failed to process memory training!\n"); 2227 return ret; 2228 } 2229 2230 mutex_lock(&adev->firmware.mutex); 2231 2232 ret = psp_hw_start(psp); 2233 if (ret) 2234 goto failed; 2235 2236 ret = psp_np_fw_load(psp); 2237 if (ret) 2238 goto failed; 2239 2240 ret = psp_asd_load(psp); 2241 if (ret) { 2242 DRM_ERROR("PSP load asd failed!\n"); 2243 goto failed; 2244 } 2245 2246 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2247 ret = psp_xgmi_initialize(psp); 2248 /* Warning the XGMI seesion initialize failure 2249 * Instead of stop driver initialization 2250 */ 2251 if (ret) 2252 dev_err(psp->adev->dev, 2253 "XGMI: Failed to initialize XGMI session\n"); 2254 } 2255 2256 if (psp->adev->psp.ta_fw) { 2257 ret = psp_ras_initialize(psp); 2258 if (ret) 2259 dev_err(psp->adev->dev, 2260 "RAS: Failed to initialize RAS\n"); 2261 2262 ret = psp_hdcp_initialize(psp); 2263 if (ret) 2264 dev_err(psp->adev->dev, 2265 "HDCP: Failed to initialize HDCP\n"); 2266 2267 ret = psp_dtm_initialize(psp); 2268 if (ret) 2269 dev_err(psp->adev->dev, 2270 "DTM: Failed to initialize DTM\n"); 2271 2272 ret = psp_rap_initialize(psp); 2273 if (ret) 2274 dev_err(psp->adev->dev, 2275 "RAP: Failed to initialize RAP\n"); 2276 } 2277 2278 mutex_unlock(&adev->firmware.mutex); 2279 2280 return 0; 2281 2282 failed: 2283 DRM_ERROR("PSP resume failed\n"); 2284 mutex_unlock(&adev->firmware.mutex); 2285 return ret; 2286 } 2287 2288 int psp_gpu_reset(struct amdgpu_device *adev) 2289 { 2290 int ret; 2291 2292 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 2293 return 0; 2294 2295 mutex_lock(&adev->psp.mutex); 2296 ret = psp_mode1_reset(&adev->psp); 2297 mutex_unlock(&adev->psp.mutex); 2298 2299 return ret; 2300 } 2301 2302 int psp_rlc_autoload_start(struct psp_context *psp) 2303 { 2304 int ret; 2305 struct psp_gfx_cmd_resp *cmd; 2306 2307 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 2308 if (!cmd) 2309 return -ENOMEM; 2310 2311 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 2312 2313 ret = psp_cmd_submit_buf(psp, NULL, cmd, 2314 psp->fence_buf_mc_addr); 2315 kfree(cmd); 2316 return ret; 2317 } 2318 2319 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, 2320 uint64_t cmd_gpu_addr, int cmd_size) 2321 { 2322 struct amdgpu_firmware_info ucode = {0}; 2323 2324 ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : 2325 AMDGPU_UCODE_ID_VCN0_RAM; 2326 ucode.mc_addr = cmd_gpu_addr; 2327 ucode.ucode_size = cmd_size; 2328 2329 return psp_execute_np_fw_load(&adev->psp, &ucode); 2330 } 2331 2332 int psp_ring_cmd_submit(struct psp_context *psp, 2333 uint64_t cmd_buf_mc_addr, 2334 uint64_t fence_mc_addr, 2335 int index) 2336 { 2337 unsigned int psp_write_ptr_reg = 0; 2338 struct psp_gfx_rb_frame *write_frame; 2339 struct psp_ring *ring = &psp->km_ring; 2340 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 2341 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 2342 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 2343 struct amdgpu_device *adev = psp->adev; 2344 uint32_t ring_size_dw = ring->ring_size / 4; 2345 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 2346 2347 /* KM (GPCOM) prepare write pointer */ 2348 psp_write_ptr_reg = psp_ring_get_wptr(psp); 2349 2350 /* Update KM RB frame pointer to new frame */ 2351 /* write_frame ptr increments by size of rb_frame in bytes */ 2352 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 2353 if ((psp_write_ptr_reg % ring_size_dw) == 0) 2354 write_frame = ring_buffer_start; 2355 else 2356 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 2357 /* Check invalid write_frame ptr address */ 2358 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 2359 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 2360 ring_buffer_start, ring_buffer_end, write_frame); 2361 DRM_ERROR("write_frame is pointing to address out of bounds\n"); 2362 return -EINVAL; 2363 } 2364 2365 /* Initialize KM RB frame */ 2366 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 2367 2368 /* Update KM RB frame */ 2369 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 2370 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 2371 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 2372 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 2373 write_frame->fence_value = index; 2374 amdgpu_asic_flush_hdp(adev, NULL); 2375 2376 /* Update the write Pointer in DWORDs */ 2377 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 2378 psp_ring_set_wptr(psp, psp_write_ptr_reg); 2379 return 0; 2380 } 2381 2382 int psp_init_asd_microcode(struct psp_context *psp, 2383 const char *chip_name) 2384 { 2385 struct amdgpu_device *adev = psp->adev; 2386 char fw_name[30]; 2387 const struct psp_firmware_header_v1_0 *asd_hdr; 2388 int err = 0; 2389 2390 if (!chip_name) { 2391 dev_err(adev->dev, "invalid chip name for asd microcode\n"); 2392 return -EINVAL; 2393 } 2394 2395 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 2396 err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); 2397 if (err) 2398 goto out; 2399 2400 err = amdgpu_ucode_validate(adev->psp.asd_fw); 2401 if (err) 2402 goto out; 2403 2404 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 2405 adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 2406 adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); 2407 adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 2408 adev->psp.asd_start_addr = (uint8_t *)asd_hdr + 2409 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 2410 return 0; 2411 out: 2412 dev_err(adev->dev, "fail to initialize asd microcode\n"); 2413 release_firmware(adev->psp.asd_fw); 2414 adev->psp.asd_fw = NULL; 2415 return err; 2416 } 2417 2418 int psp_init_sos_microcode(struct psp_context *psp, 2419 const char *chip_name) 2420 { 2421 struct amdgpu_device *adev = psp->adev; 2422 char fw_name[30]; 2423 const struct psp_firmware_header_v1_0 *sos_hdr; 2424 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 2425 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 2426 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 2427 int err = 0; 2428 2429 if (!chip_name) { 2430 dev_err(adev->dev, "invalid chip name for sos microcode\n"); 2431 return -EINVAL; 2432 } 2433 2434 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); 2435 err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); 2436 if (err) 2437 goto out; 2438 2439 err = amdgpu_ucode_validate(adev->psp.sos_fw); 2440 if (err) 2441 goto out; 2442 2443 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 2444 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 2445 2446 switch (sos_hdr->header.header_version_major) { 2447 case 1: 2448 adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 2449 adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); 2450 adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); 2451 adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes); 2452 adev->psp.sys_start_addr = (uint8_t *)sos_hdr + 2453 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 2454 adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2455 le32_to_cpu(sos_hdr->sos_offset_bytes); 2456 if (sos_hdr->header.header_version_minor == 1) { 2457 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 2458 adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes); 2459 adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2460 le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes); 2461 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes); 2462 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2463 le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes); 2464 } 2465 if (sos_hdr->header.header_version_minor == 2) { 2466 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 2467 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes); 2468 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2469 le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes); 2470 } 2471 if (sos_hdr->header.header_version_minor == 3) { 2472 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 2473 adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc_size_bytes); 2474 adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2475 le32_to_cpu(sos_hdr_v1_3->v1_1.toc_offset_bytes); 2476 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_size_bytes); 2477 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2478 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_offset_bytes); 2479 adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes); 2480 adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2481 le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes); 2482 } 2483 break; 2484 default: 2485 dev_err(adev->dev, 2486 "unsupported psp sos firmware\n"); 2487 err = -EINVAL; 2488 goto out; 2489 } 2490 2491 return 0; 2492 out: 2493 dev_err(adev->dev, 2494 "failed to init sos firmware\n"); 2495 release_firmware(adev->psp.sos_fw); 2496 adev->psp.sos_fw = NULL; 2497 2498 return err; 2499 } 2500 2501 int parse_ta_bin_descriptor(struct psp_context *psp, 2502 const struct ta_fw_bin_desc *desc, 2503 const struct ta_firmware_header_v2_0 *ta_hdr) 2504 { 2505 uint8_t *ucode_start_addr = NULL; 2506 2507 if (!psp || !desc || !ta_hdr) 2508 return -EINVAL; 2509 2510 ucode_start_addr = (uint8_t *)ta_hdr + 2511 le32_to_cpu(desc->offset_bytes) + 2512 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 2513 2514 switch (desc->fw_type) { 2515 case TA_FW_TYPE_PSP_ASD: 2516 psp->asd_fw_version = le32_to_cpu(desc->fw_version); 2517 psp->asd_feature_version = le32_to_cpu(desc->fw_version); 2518 psp->asd_ucode_size = le32_to_cpu(desc->size_bytes); 2519 psp->asd_start_addr = ucode_start_addr; 2520 break; 2521 case TA_FW_TYPE_PSP_XGMI: 2522 psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version); 2523 psp->ta_xgmi_ucode_size = le32_to_cpu(desc->size_bytes); 2524 psp->ta_xgmi_start_addr = ucode_start_addr; 2525 break; 2526 case TA_FW_TYPE_PSP_RAS: 2527 psp->ta_ras_ucode_version = le32_to_cpu(desc->fw_version); 2528 psp->ta_ras_ucode_size = le32_to_cpu(desc->size_bytes); 2529 psp->ta_ras_start_addr = ucode_start_addr; 2530 break; 2531 case TA_FW_TYPE_PSP_HDCP: 2532 psp->ta_hdcp_ucode_version = le32_to_cpu(desc->fw_version); 2533 psp->ta_hdcp_ucode_size = le32_to_cpu(desc->size_bytes); 2534 psp->ta_hdcp_start_addr = ucode_start_addr; 2535 break; 2536 case TA_FW_TYPE_PSP_DTM: 2537 psp->ta_dtm_ucode_version = le32_to_cpu(desc->fw_version); 2538 psp->ta_dtm_ucode_size = le32_to_cpu(desc->size_bytes); 2539 psp->ta_dtm_start_addr = ucode_start_addr; 2540 break; 2541 case TA_FW_TYPE_PSP_RAP: 2542 psp->ta_rap_ucode_version = le32_to_cpu(desc->fw_version); 2543 psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes); 2544 psp->ta_rap_start_addr = ucode_start_addr; 2545 break; 2546 default: 2547 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 2548 break; 2549 } 2550 2551 return 0; 2552 } 2553 2554 int psp_init_ta_microcode(struct psp_context *psp, 2555 const char *chip_name) 2556 { 2557 struct amdgpu_device *adev = psp->adev; 2558 char fw_name[30]; 2559 const struct ta_firmware_header_v2_0 *ta_hdr; 2560 int err = 0; 2561 int ta_index = 0; 2562 2563 if (!chip_name) { 2564 dev_err(adev->dev, "invalid chip name for ta microcode\n"); 2565 return -EINVAL; 2566 } 2567 2568 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 2569 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); 2570 if (err) 2571 goto out; 2572 2573 err = amdgpu_ucode_validate(adev->psp.ta_fw); 2574 if (err) 2575 goto out; 2576 2577 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 2578 2579 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) { 2580 dev_err(adev->dev, "unsupported TA header version\n"); 2581 err = -EINVAL; 2582 goto out; 2583 } 2584 2585 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_TA_PACKAGING) { 2586 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 2587 err = -EINVAL; 2588 goto out; 2589 } 2590 2591 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 2592 err = parse_ta_bin_descriptor(psp, 2593 &ta_hdr->ta_fw_bin[ta_index], 2594 ta_hdr); 2595 if (err) 2596 goto out; 2597 } 2598 2599 return 0; 2600 out: 2601 dev_err(adev->dev, "fail to initialize ta microcode\n"); 2602 release_firmware(adev->psp.ta_fw); 2603 adev->psp.ta_fw = NULL; 2604 return err; 2605 } 2606 2607 static int psp_set_clockgating_state(void *handle, 2608 enum amd_clockgating_state state) 2609 { 2610 return 0; 2611 } 2612 2613 static int psp_set_powergating_state(void *handle, 2614 enum amd_powergating_state state) 2615 { 2616 return 0; 2617 } 2618 2619 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 2620 struct device_attribute *attr, 2621 char *buf) 2622 { 2623 struct drm_device *ddev = dev_get_drvdata(dev); 2624 struct amdgpu_device *adev = drm_to_adev(ddev); 2625 uint32_t fw_ver; 2626 int ret; 2627 2628 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 2629 DRM_INFO("PSP block is not ready yet."); 2630 return -EBUSY; 2631 } 2632 2633 mutex_lock(&adev->psp.mutex); 2634 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 2635 mutex_unlock(&adev->psp.mutex); 2636 2637 if (ret) { 2638 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); 2639 return ret; 2640 } 2641 2642 return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); 2643 } 2644 2645 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 2646 struct device_attribute *attr, 2647 const char *buf, 2648 size_t count) 2649 { 2650 struct drm_device *ddev = dev_get_drvdata(dev); 2651 struct amdgpu_device *adev = drm_to_adev(ddev); 2652 void *cpu_addr; 2653 dma_addr_t dma_addr; 2654 int ret; 2655 char fw_name[100]; 2656 const struct firmware *usbc_pd_fw; 2657 2658 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 2659 DRM_INFO("PSP block is not ready yet."); 2660 return -EBUSY; 2661 } 2662 2663 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 2664 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 2665 if (ret) 2666 goto fail; 2667 2668 /* We need contiguous physical mem to place the FW for psp to access */ 2669 cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); 2670 2671 ret = dma_mapping_error(adev->dev, dma_addr); 2672 if (ret) 2673 goto rel_buf; 2674 2675 memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 2676 2677 /* 2678 * x86 specific workaround. 2679 * Without it the buffer is invisible in PSP. 2680 * 2681 * TODO Remove once PSP starts snooping CPU cache 2682 */ 2683 #ifdef CONFIG_X86 2684 clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); 2685 #endif 2686 2687 mutex_lock(&adev->psp.mutex); 2688 ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); 2689 mutex_unlock(&adev->psp.mutex); 2690 2691 rel_buf: 2692 dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); 2693 release_firmware(usbc_pd_fw); 2694 2695 fail: 2696 if (ret) { 2697 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); 2698 return ret; 2699 } 2700 2701 return count; 2702 } 2703 2704 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, 2705 psp_usbc_pd_fw_sysfs_read, 2706 psp_usbc_pd_fw_sysfs_write); 2707 2708 2709 2710 const struct amd_ip_funcs psp_ip_funcs = { 2711 .name = "psp", 2712 .early_init = psp_early_init, 2713 .late_init = NULL, 2714 .sw_init = psp_sw_init, 2715 .sw_fini = psp_sw_fini, 2716 .hw_init = psp_hw_init, 2717 .hw_fini = psp_hw_fini, 2718 .suspend = psp_suspend, 2719 .resume = psp_resume, 2720 .is_idle = NULL, 2721 .check_soft_reset = NULL, 2722 .wait_for_idle = NULL, 2723 .soft_reset = NULL, 2724 .set_clockgating_state = psp_set_clockgating_state, 2725 .set_powergating_state = psp_set_powergating_state, 2726 }; 2727 2728 static int psp_sysfs_init(struct amdgpu_device *adev) 2729 { 2730 int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); 2731 2732 if (ret) 2733 DRM_ERROR("Failed to create USBC PD FW control file!"); 2734 2735 return ret; 2736 } 2737 2738 static void psp_sysfs_fini(struct amdgpu_device *adev) 2739 { 2740 device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); 2741 } 2742 2743 const struct amdgpu_ip_block_version psp_v3_1_ip_block = 2744 { 2745 .type = AMD_IP_BLOCK_TYPE_PSP, 2746 .major = 3, 2747 .minor = 1, 2748 .rev = 0, 2749 .funcs = &psp_ip_funcs, 2750 }; 2751 2752 const struct amdgpu_ip_block_version psp_v10_0_ip_block = 2753 { 2754 .type = AMD_IP_BLOCK_TYPE_PSP, 2755 .major = 10, 2756 .minor = 0, 2757 .rev = 0, 2758 .funcs = &psp_ip_funcs, 2759 }; 2760 2761 const struct amdgpu_ip_block_version psp_v11_0_ip_block = 2762 { 2763 .type = AMD_IP_BLOCK_TYPE_PSP, 2764 .major = 11, 2765 .minor = 0, 2766 .rev = 0, 2767 .funcs = &psp_ip_funcs, 2768 }; 2769 2770 const struct amdgpu_ip_block_version psp_v12_0_ip_block = 2771 { 2772 .type = AMD_IP_BLOCK_TYPE_PSP, 2773 .major = 12, 2774 .minor = 0, 2775 .rev = 0, 2776 .funcs = &psp_ip_funcs, 2777 }; 2778