1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <linux/dma-mapping.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "soc15_common.h" 33 #include "psp_v3_1.h" 34 #include "psp_v10_0.h" 35 #include "psp_v11_0.h" 36 #include "psp_v12_0.h" 37 38 #include "amdgpu_ras.h" 39 40 static int psp_sysfs_init(struct amdgpu_device *adev); 41 static void psp_sysfs_fini(struct amdgpu_device *adev); 42 43 static int psp_load_smu_fw(struct psp_context *psp); 44 45 /* 46 * Due to DF Cstate management centralized to PMFW, the firmware 47 * loading sequence will be updated as below: 48 * - Load KDB 49 * - Load SYS_DRV 50 * - Load tOS 51 * - Load PMFW 52 * - Setup TMR 53 * - Load other non-psp fw 54 * - Load ASD 55 * - Load XGMI/RAS/HDCP/DTM TA if any 56 * 57 * This new sequence is required for 58 * - Arcturus 59 * - Navi12 and onwards 60 */ 61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 62 { 63 struct amdgpu_device *adev = psp->adev; 64 65 psp->pmfw_centralized_cstate_management = false; 66 67 if (amdgpu_sriov_vf(adev)) 68 return; 69 70 if (adev->flags & AMD_IS_APU) 71 return; 72 73 if ((adev->asic_type == CHIP_ARCTURUS) || 74 (adev->asic_type >= CHIP_NAVI12)) 75 psp->pmfw_centralized_cstate_management = true; 76 } 77 78 static int psp_early_init(void *handle) 79 { 80 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 81 struct psp_context *psp = &adev->psp; 82 83 switch (adev->asic_type) { 84 case CHIP_VEGA10: 85 case CHIP_VEGA12: 86 psp_v3_1_set_psp_funcs(psp); 87 psp->autoload_supported = false; 88 break; 89 case CHIP_RAVEN: 90 psp_v10_0_set_psp_funcs(psp); 91 psp->autoload_supported = false; 92 break; 93 case CHIP_VEGA20: 94 case CHIP_ARCTURUS: 95 psp_v11_0_set_psp_funcs(psp); 96 psp->autoload_supported = false; 97 break; 98 case CHIP_NAVI10: 99 case CHIP_NAVI14: 100 case CHIP_NAVI12: 101 case CHIP_SIENNA_CICHLID: 102 case CHIP_NAVY_FLOUNDER: 103 psp_v11_0_set_psp_funcs(psp); 104 psp->autoload_supported = true; 105 break; 106 case CHIP_RENOIR: 107 psp_v12_0_set_psp_funcs(psp); 108 break; 109 default: 110 return -EINVAL; 111 } 112 113 psp->adev = adev; 114 115 psp_check_pmfw_centralized_cstate_management(psp); 116 117 return 0; 118 } 119 120 static void psp_memory_training_fini(struct psp_context *psp) 121 { 122 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 123 124 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 125 kfree(ctx->sys_cache); 126 ctx->sys_cache = NULL; 127 } 128 129 static int psp_memory_training_init(struct psp_context *psp) 130 { 131 int ret; 132 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 133 134 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 135 DRM_DEBUG("memory training is not supported!\n"); 136 return 0; 137 } 138 139 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 140 if (ctx->sys_cache == NULL) { 141 DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); 142 ret = -ENOMEM; 143 goto Err_out; 144 } 145 146 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 147 ctx->train_data_size, 148 ctx->p2c_train_data_offset, 149 ctx->c2p_train_data_offset); 150 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 151 return 0; 152 153 Err_out: 154 psp_memory_training_fini(psp); 155 return ret; 156 } 157 158 static int psp_sw_init(void *handle) 159 { 160 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 161 struct psp_context *psp = &adev->psp; 162 int ret; 163 164 ret = psp_init_microcode(psp); 165 if (ret) { 166 DRM_ERROR("Failed to load psp firmware!\n"); 167 return ret; 168 } 169 170 ret = psp_memory_training_init(psp); 171 if (ret) { 172 DRM_ERROR("Failed to initialize memory training!\n"); 173 return ret; 174 } 175 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 176 if (ret) { 177 DRM_ERROR("Failed to process memory training!\n"); 178 return ret; 179 } 180 181 if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) { 182 ret= psp_sysfs_init(adev); 183 if (ret) { 184 return ret; 185 } 186 } 187 188 return 0; 189 } 190 191 static int psp_sw_fini(void *handle) 192 { 193 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 194 195 psp_memory_training_fini(&adev->psp); 196 if (adev->psp.sos_fw) { 197 release_firmware(adev->psp.sos_fw); 198 adev->psp.sos_fw = NULL; 199 } 200 if (adev->psp.asd_fw) { 201 release_firmware(adev->psp.asd_fw); 202 adev->psp.asd_fw = NULL; 203 } 204 if (adev->psp.ta_fw) { 205 release_firmware(adev->psp.ta_fw); 206 adev->psp.ta_fw = NULL; 207 } 208 209 if (adev->asic_type == CHIP_NAVI10) 210 psp_sysfs_fini(adev); 211 212 return 0; 213 } 214 215 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 216 uint32_t reg_val, uint32_t mask, bool check_changed) 217 { 218 uint32_t val; 219 int i; 220 struct amdgpu_device *adev = psp->adev; 221 222 if (psp->adev->in_pci_err_recovery) 223 return 0; 224 225 for (i = 0; i < adev->usec_timeout; i++) { 226 val = RREG32(reg_index); 227 if (check_changed) { 228 if (val != reg_val) 229 return 0; 230 } else { 231 if ((val & mask) == reg_val) 232 return 0; 233 } 234 udelay(1); 235 } 236 237 return -ETIME; 238 } 239 240 static int 241 psp_cmd_submit_buf(struct psp_context *psp, 242 struct amdgpu_firmware_info *ucode, 243 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 244 { 245 int ret; 246 int index; 247 int timeout = 2000; 248 bool ras_intr = false; 249 bool skip_unsupport = false; 250 251 if (psp->adev->in_pci_err_recovery) 252 return 0; 253 254 mutex_lock(&psp->mutex); 255 256 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 257 258 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 259 260 index = atomic_inc_return(&psp->fence_value); 261 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 262 if (ret) { 263 atomic_dec(&psp->fence_value); 264 mutex_unlock(&psp->mutex); 265 return ret; 266 } 267 268 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 269 while (*((unsigned int *)psp->fence_buf) != index) { 270 if (--timeout == 0) 271 break; 272 /* 273 * Shouldn't wait for timeout when err_event_athub occurs, 274 * because gpu reset thread triggered and lock resource should 275 * be released for psp resume sequence. 276 */ 277 ras_intr = amdgpu_ras_intr_triggered(); 278 if (ras_intr) 279 break; 280 msleep(1); 281 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 282 } 283 284 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 285 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 286 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 287 288 /* In some cases, psp response status is not 0 even there is no 289 * problem while the command is submitted. Some version of PSP FW 290 * doesn't write 0 to that field. 291 * So here we would like to only print a warning instead of an error 292 * during psp initialization to avoid breaking hw_init and it doesn't 293 * return -EINVAL. 294 */ 295 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 296 if (ucode) 297 DRM_WARN("failed to load ucode id (%d) ", 298 ucode->ucode_id); 299 DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n", 300 psp->cmd_buf_mem->cmd_id, 301 psp->cmd_buf_mem->resp.status); 302 if (!timeout) { 303 mutex_unlock(&psp->mutex); 304 return -EINVAL; 305 } 306 } 307 308 /* get xGMI session id from response buffer */ 309 cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id; 310 311 if (ucode) { 312 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 313 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 314 } 315 mutex_unlock(&psp->mutex); 316 317 return ret; 318 } 319 320 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 321 struct psp_gfx_cmd_resp *cmd, 322 uint64_t tmr_mc, uint32_t size) 323 { 324 if (amdgpu_sriov_vf(psp->adev)) 325 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 326 else 327 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 328 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 329 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 330 cmd->cmd.cmd_setup_tmr.buf_size = size; 331 } 332 333 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 334 uint64_t pri_buf_mc, uint32_t size) 335 { 336 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 337 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 338 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 339 cmd->cmd.cmd_load_toc.toc_size = size; 340 } 341 342 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 343 static int psp_load_toc(struct psp_context *psp, 344 uint32_t *tmr_size) 345 { 346 int ret; 347 struct psp_gfx_cmd_resp *cmd; 348 349 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 350 if (!cmd) 351 return -ENOMEM; 352 /* Copy toc to psp firmware private buffer */ 353 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 354 memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size); 355 356 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size); 357 358 ret = psp_cmd_submit_buf(psp, NULL, cmd, 359 psp->fence_buf_mc_addr); 360 if (!ret) 361 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 362 kfree(cmd); 363 return ret; 364 } 365 366 /* Set up Trusted Memory Region */ 367 static int psp_tmr_init(struct psp_context *psp) 368 { 369 int ret; 370 int tmr_size; 371 void *tmr_buf; 372 void **pptr; 373 374 /* 375 * According to HW engineer, they prefer the TMR address be "naturally 376 * aligned" , e.g. the start address be an integer divide of TMR size. 377 * 378 * Note: this memory need be reserved till the driver 379 * uninitializes. 380 */ 381 tmr_size = PSP_TMR_SIZE; 382 383 /* For ASICs support RLC autoload, psp will parse the toc 384 * and calculate the total size of TMR needed */ 385 if (!amdgpu_sriov_vf(psp->adev) && 386 psp->toc_start_addr && 387 psp->toc_bin_size && 388 psp->fw_pri_buf) { 389 ret = psp_load_toc(psp, &tmr_size); 390 if (ret) { 391 DRM_ERROR("Failed to load toc\n"); 392 return ret; 393 } 394 } 395 396 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 397 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE, 398 AMDGPU_GEM_DOMAIN_VRAM, 399 &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 400 401 return ret; 402 } 403 404 static int psp_clear_vf_fw(struct psp_context *psp) 405 { 406 int ret; 407 struct psp_gfx_cmd_resp *cmd; 408 409 if (!amdgpu_sriov_vf(psp->adev) || psp->adev->asic_type != CHIP_NAVI12) 410 return 0; 411 412 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 413 if (!cmd) 414 return -ENOMEM; 415 416 cmd->cmd_id = GFX_CMD_ID_CLEAR_VF_FW; 417 418 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 419 kfree(cmd); 420 421 return ret; 422 } 423 424 static bool psp_skip_tmr(struct psp_context *psp) 425 { 426 switch (psp->adev->asic_type) { 427 case CHIP_NAVI12: 428 case CHIP_SIENNA_CICHLID: 429 return true; 430 default: 431 return false; 432 } 433 } 434 435 static int psp_tmr_load(struct psp_context *psp) 436 { 437 int ret; 438 struct psp_gfx_cmd_resp *cmd; 439 440 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 441 * Already set up by host driver. 442 */ 443 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 444 return 0; 445 446 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 447 if (!cmd) 448 return -ENOMEM; 449 450 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, 451 amdgpu_bo_size(psp->tmr_bo)); 452 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 453 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 454 455 ret = psp_cmd_submit_buf(psp, NULL, cmd, 456 psp->fence_buf_mc_addr); 457 458 kfree(cmd); 459 460 return ret; 461 } 462 463 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 464 struct psp_gfx_cmd_resp *cmd) 465 { 466 if (amdgpu_sriov_vf(psp->adev)) 467 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 468 else 469 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 470 } 471 472 static int psp_tmr_unload(struct psp_context *psp) 473 { 474 int ret; 475 struct psp_gfx_cmd_resp *cmd; 476 477 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 478 if (!cmd) 479 return -ENOMEM; 480 481 psp_prep_tmr_unload_cmd_buf(psp, cmd); 482 DRM_INFO("free PSP TMR buffer\n"); 483 484 ret = psp_cmd_submit_buf(psp, NULL, cmd, 485 psp->fence_buf_mc_addr); 486 487 kfree(cmd); 488 489 return ret; 490 } 491 492 static int psp_tmr_terminate(struct psp_context *psp) 493 { 494 int ret; 495 void *tmr_buf; 496 void **pptr; 497 498 ret = psp_tmr_unload(psp); 499 if (ret) 500 return ret; 501 502 /* free TMR memory buffer */ 503 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 504 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 505 506 return 0; 507 } 508 509 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 510 uint64_t asd_mc, uint32_t size) 511 { 512 cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; 513 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); 514 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); 515 cmd->cmd.cmd_load_ta.app_len = size; 516 517 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; 518 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; 519 cmd->cmd.cmd_load_ta.cmd_buf_len = 0; 520 } 521 522 static int psp_asd_load(struct psp_context *psp) 523 { 524 int ret; 525 struct psp_gfx_cmd_resp *cmd; 526 527 /* If PSP version doesn't match ASD version, asd loading will be failed. 528 * add workaround to bypass it for sriov now. 529 * TODO: add version check to make it common 530 */ 531 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw) 532 return 0; 533 534 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 535 if (!cmd) 536 return -ENOMEM; 537 538 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 539 memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); 540 541 psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 542 psp->asd_ucode_size); 543 544 ret = psp_cmd_submit_buf(psp, NULL, cmd, 545 psp->fence_buf_mc_addr); 546 if (!ret) { 547 psp->asd_context.asd_initialized = true; 548 psp->asd_context.session_id = cmd->resp.session_id; 549 } 550 551 kfree(cmd); 552 553 return ret; 554 } 555 556 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 557 uint32_t session_id) 558 { 559 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 560 cmd->cmd.cmd_unload_ta.session_id = session_id; 561 } 562 563 static int psp_asd_unload(struct psp_context *psp) 564 { 565 int ret; 566 struct psp_gfx_cmd_resp *cmd; 567 568 if (amdgpu_sriov_vf(psp->adev)) 569 return 0; 570 571 if (!psp->asd_context.asd_initialized) 572 return 0; 573 574 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 575 if (!cmd) 576 return -ENOMEM; 577 578 psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); 579 580 ret = psp_cmd_submit_buf(psp, NULL, cmd, 581 psp->fence_buf_mc_addr); 582 if (!ret) 583 psp->asd_context.asd_initialized = false; 584 585 kfree(cmd); 586 587 return ret; 588 } 589 590 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 591 uint32_t id, uint32_t value) 592 { 593 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 594 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 595 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 596 } 597 598 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 599 uint32_t value) 600 { 601 struct psp_gfx_cmd_resp *cmd = NULL; 602 int ret = 0; 603 604 if (reg >= PSP_REG_LAST) 605 return -EINVAL; 606 607 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 608 if (!cmd) 609 return -ENOMEM; 610 611 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 612 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 613 614 kfree(cmd); 615 return ret; 616 } 617 618 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 619 uint64_t ta_bin_mc, 620 uint32_t ta_bin_size, 621 uint64_t ta_shared_mc, 622 uint32_t ta_shared_size) 623 { 624 cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 625 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 626 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 627 cmd->cmd.cmd_load_ta.app_len = ta_bin_size; 628 629 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); 630 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); 631 cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; 632 } 633 634 static int psp_xgmi_init_shared_buf(struct psp_context *psp) 635 { 636 int ret; 637 638 /* 639 * Allocate 16k memory aligned to 4k from Frame Buffer (local 640 * physical) for xgmi ta <-> Driver 641 */ 642 ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, 643 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 644 &psp->xgmi_context.xgmi_shared_bo, 645 &psp->xgmi_context.xgmi_shared_mc_addr, 646 &psp->xgmi_context.xgmi_shared_buf); 647 648 return ret; 649 } 650 651 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 652 uint32_t ta_cmd_id, 653 uint32_t session_id) 654 { 655 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 656 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 657 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 658 } 659 660 static int psp_ta_invoke(struct psp_context *psp, 661 uint32_t ta_cmd_id, 662 uint32_t session_id) 663 { 664 int ret; 665 struct psp_gfx_cmd_resp *cmd; 666 667 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 668 if (!cmd) 669 return -ENOMEM; 670 671 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); 672 673 ret = psp_cmd_submit_buf(psp, NULL, cmd, 674 psp->fence_buf_mc_addr); 675 676 kfree(cmd); 677 678 return ret; 679 } 680 681 static int psp_xgmi_load(struct psp_context *psp) 682 { 683 int ret; 684 struct psp_gfx_cmd_resp *cmd; 685 686 /* 687 * TODO: bypass the loading in sriov for now 688 */ 689 690 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 691 if (!cmd) 692 return -ENOMEM; 693 694 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 695 memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); 696 697 psp_prep_ta_load_cmd_buf(cmd, 698 psp->fw_pri_mc_addr, 699 psp->ta_xgmi_ucode_size, 700 psp->xgmi_context.xgmi_shared_mc_addr, 701 PSP_XGMI_SHARED_MEM_SIZE); 702 703 ret = psp_cmd_submit_buf(psp, NULL, cmd, 704 psp->fence_buf_mc_addr); 705 706 if (!ret) { 707 psp->xgmi_context.initialized = 1; 708 psp->xgmi_context.session_id = cmd->resp.session_id; 709 } 710 711 kfree(cmd); 712 713 return ret; 714 } 715 716 static int psp_xgmi_unload(struct psp_context *psp) 717 { 718 int ret; 719 struct psp_gfx_cmd_resp *cmd; 720 struct amdgpu_device *adev = psp->adev; 721 722 /* XGMI TA unload currently is not supported on Arcturus */ 723 if (adev->asic_type == CHIP_ARCTURUS) 724 return 0; 725 726 /* 727 * TODO: bypass the unloading in sriov for now 728 */ 729 730 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 731 if (!cmd) 732 return -ENOMEM; 733 734 psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); 735 736 ret = psp_cmd_submit_buf(psp, NULL, cmd, 737 psp->fence_buf_mc_addr); 738 739 kfree(cmd); 740 741 return ret; 742 } 743 744 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 745 { 746 return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); 747 } 748 749 int psp_xgmi_terminate(struct psp_context *psp) 750 { 751 int ret; 752 753 if (!psp->xgmi_context.initialized) 754 return 0; 755 756 ret = psp_xgmi_unload(psp); 757 if (ret) 758 return ret; 759 760 psp->xgmi_context.initialized = 0; 761 762 /* free xgmi shared memory */ 763 amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, 764 &psp->xgmi_context.xgmi_shared_mc_addr, 765 &psp->xgmi_context.xgmi_shared_buf); 766 767 return 0; 768 } 769 770 int psp_xgmi_initialize(struct psp_context *psp) 771 { 772 struct ta_xgmi_shared_memory *xgmi_cmd; 773 int ret; 774 775 if (!psp->adev->psp.ta_fw || 776 !psp->adev->psp.ta_xgmi_ucode_size || 777 !psp->adev->psp.ta_xgmi_start_addr) 778 return -ENOENT; 779 780 if (!psp->xgmi_context.initialized) { 781 ret = psp_xgmi_init_shared_buf(psp); 782 if (ret) 783 return ret; 784 } 785 786 /* Load XGMI TA */ 787 ret = psp_xgmi_load(psp); 788 if (ret) 789 return ret; 790 791 /* Initialize XGMI session */ 792 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); 793 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 794 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 795 796 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 797 798 return ret; 799 } 800 801 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 802 { 803 struct ta_xgmi_shared_memory *xgmi_cmd; 804 int ret; 805 806 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 807 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 808 809 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 810 811 /* Invoke xgmi ta to get hive id */ 812 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 813 if (ret) 814 return ret; 815 816 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 817 818 return 0; 819 } 820 821 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 822 { 823 struct ta_xgmi_shared_memory *xgmi_cmd; 824 int ret; 825 826 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 827 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 828 829 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 830 831 /* Invoke xgmi ta to get the node id */ 832 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 833 if (ret) 834 return ret; 835 836 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 837 838 return 0; 839 } 840 841 int psp_xgmi_get_topology_info(struct psp_context *psp, 842 int number_devices, 843 struct psp_xgmi_topology_info *topology) 844 { 845 struct ta_xgmi_shared_memory *xgmi_cmd; 846 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 847 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 848 int i; 849 int ret; 850 851 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 852 return -EINVAL; 853 854 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 855 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 856 857 /* Fill in the shared memory with topology information as input */ 858 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 859 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO; 860 topology_info_input->num_nodes = number_devices; 861 862 for (i = 0; i < topology_info_input->num_nodes; i++) { 863 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 864 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 865 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 866 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 867 } 868 869 /* Invoke xgmi ta to get the topology information */ 870 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO); 871 if (ret) 872 return ret; 873 874 /* Read the output topology information from the shared memory */ 875 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 876 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 877 for (i = 0; i < topology->num_nodes; i++) { 878 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 879 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 880 topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled; 881 topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine; 882 } 883 884 return 0; 885 } 886 887 int psp_xgmi_set_topology_info(struct psp_context *psp, 888 int number_devices, 889 struct psp_xgmi_topology_info *topology) 890 { 891 struct ta_xgmi_shared_memory *xgmi_cmd; 892 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 893 int i; 894 895 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 896 return -EINVAL; 897 898 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 899 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 900 901 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 902 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 903 topology_info_input->num_nodes = number_devices; 904 905 for (i = 0; i < topology_info_input->num_nodes; i++) { 906 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 907 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 908 topology_info_input->nodes[i].is_sharing_enabled = 1; 909 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 910 } 911 912 /* Invoke xgmi ta to set topology information */ 913 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 914 } 915 916 // ras begin 917 static int psp_ras_init_shared_buf(struct psp_context *psp) 918 { 919 int ret; 920 921 /* 922 * Allocate 16k memory aligned to 4k from Frame Buffer (local 923 * physical) for ras ta <-> Driver 924 */ 925 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE, 926 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 927 &psp->ras.ras_shared_bo, 928 &psp->ras.ras_shared_mc_addr, 929 &psp->ras.ras_shared_buf); 930 931 return ret; 932 } 933 934 static int psp_ras_load(struct psp_context *psp) 935 { 936 int ret; 937 struct psp_gfx_cmd_resp *cmd; 938 struct ta_ras_shared_memory *ras_cmd; 939 940 /* 941 * TODO: bypass the loading in sriov for now 942 */ 943 if (amdgpu_sriov_vf(psp->adev)) 944 return 0; 945 946 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 947 if (!cmd) 948 return -ENOMEM; 949 950 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 951 memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); 952 953 psp_prep_ta_load_cmd_buf(cmd, 954 psp->fw_pri_mc_addr, 955 psp->ta_ras_ucode_size, 956 psp->ras.ras_shared_mc_addr, 957 PSP_RAS_SHARED_MEM_SIZE); 958 959 ret = psp_cmd_submit_buf(psp, NULL, cmd, 960 psp->fence_buf_mc_addr); 961 962 ras_cmd = (struct ta_ras_shared_memory*)psp->ras.ras_shared_buf; 963 964 if (!ret) { 965 psp->ras.session_id = cmd->resp.session_id; 966 967 if (!ras_cmd->ras_status) 968 psp->ras.ras_initialized = true; 969 else 970 dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 971 } 972 973 if (ret || ras_cmd->ras_status) 974 amdgpu_ras_fini(psp->adev); 975 976 kfree(cmd); 977 978 return ret; 979 } 980 981 static int psp_ras_unload(struct psp_context *psp) 982 { 983 int ret; 984 struct psp_gfx_cmd_resp *cmd; 985 986 /* 987 * TODO: bypass the unloading in sriov for now 988 */ 989 if (amdgpu_sriov_vf(psp->adev)) 990 return 0; 991 992 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 993 if (!cmd) 994 return -ENOMEM; 995 996 psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id); 997 998 ret = psp_cmd_submit_buf(psp, NULL, cmd, 999 psp->fence_buf_mc_addr); 1000 1001 kfree(cmd); 1002 1003 return ret; 1004 } 1005 1006 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1007 { 1008 struct ta_ras_shared_memory *ras_cmd; 1009 int ret; 1010 1011 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 1012 1013 /* 1014 * TODO: bypass the loading in sriov for now 1015 */ 1016 if (amdgpu_sriov_vf(psp->adev)) 1017 return 0; 1018 1019 ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id); 1020 1021 if (amdgpu_ras_intr_triggered()) 1022 return ret; 1023 1024 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) 1025 { 1026 DRM_WARN("RAS: Unsupported Interface"); 1027 return -EINVAL; 1028 } 1029 1030 if (!ret) { 1031 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1032 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1033 1034 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1035 } 1036 else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1037 dev_warn(psp->adev->dev, 1038 "RAS internal register access blocked\n"); 1039 } 1040 1041 return ret; 1042 } 1043 1044 int psp_ras_enable_features(struct psp_context *psp, 1045 union ta_ras_cmd_input *info, bool enable) 1046 { 1047 struct ta_ras_shared_memory *ras_cmd; 1048 int ret; 1049 1050 if (!psp->ras.ras_initialized) 1051 return -EINVAL; 1052 1053 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 1054 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1055 1056 if (enable) 1057 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 1058 else 1059 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 1060 1061 ras_cmd->ras_in_message = *info; 1062 1063 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1064 if (ret) 1065 return -EINVAL; 1066 1067 return ras_cmd->ras_status; 1068 } 1069 1070 static int psp_ras_terminate(struct psp_context *psp) 1071 { 1072 int ret; 1073 1074 /* 1075 * TODO: bypass the terminate in sriov for now 1076 */ 1077 if (amdgpu_sriov_vf(psp->adev)) 1078 return 0; 1079 1080 if (!psp->ras.ras_initialized) 1081 return 0; 1082 1083 ret = psp_ras_unload(psp); 1084 if (ret) 1085 return ret; 1086 1087 psp->ras.ras_initialized = false; 1088 1089 /* free ras shared memory */ 1090 amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, 1091 &psp->ras.ras_shared_mc_addr, 1092 &psp->ras.ras_shared_buf); 1093 1094 return 0; 1095 } 1096 1097 static int psp_ras_initialize(struct psp_context *psp) 1098 { 1099 int ret; 1100 1101 /* 1102 * TODO: bypass the initialize in sriov for now 1103 */ 1104 if (amdgpu_sriov_vf(psp->adev)) 1105 return 0; 1106 1107 if (!psp->adev->psp.ta_ras_ucode_size || 1108 !psp->adev->psp.ta_ras_start_addr) { 1109 dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n"); 1110 return 0; 1111 } 1112 1113 if (!psp->ras.ras_initialized) { 1114 ret = psp_ras_init_shared_buf(psp); 1115 if (ret) 1116 return ret; 1117 } 1118 1119 ret = psp_ras_load(psp); 1120 if (ret) 1121 return ret; 1122 1123 return 0; 1124 } 1125 1126 int psp_ras_trigger_error(struct psp_context *psp, 1127 struct ta_ras_trigger_error_input *info) 1128 { 1129 struct ta_ras_shared_memory *ras_cmd; 1130 int ret; 1131 1132 if (!psp->ras.ras_initialized) 1133 return -EINVAL; 1134 1135 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 1136 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1137 1138 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; 1139 ras_cmd->ras_in_message.trigger_error = *info; 1140 1141 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1142 if (ret) 1143 return -EINVAL; 1144 1145 /* If err_event_athub occurs error inject was successful, however 1146 return status from TA is no long reliable */ 1147 if (amdgpu_ras_intr_triggered()) 1148 return 0; 1149 1150 return ras_cmd->ras_status; 1151 } 1152 // ras end 1153 1154 // HDCP start 1155 static int psp_hdcp_init_shared_buf(struct psp_context *psp) 1156 { 1157 int ret; 1158 1159 /* 1160 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1161 * physical) for hdcp ta <-> Driver 1162 */ 1163 ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, 1164 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1165 &psp->hdcp_context.hdcp_shared_bo, 1166 &psp->hdcp_context.hdcp_shared_mc_addr, 1167 &psp->hdcp_context.hdcp_shared_buf); 1168 1169 return ret; 1170 } 1171 1172 static int psp_hdcp_load(struct psp_context *psp) 1173 { 1174 int ret; 1175 struct psp_gfx_cmd_resp *cmd; 1176 1177 /* 1178 * TODO: bypass the loading in sriov for now 1179 */ 1180 if (amdgpu_sriov_vf(psp->adev)) 1181 return 0; 1182 1183 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1184 if (!cmd) 1185 return -ENOMEM; 1186 1187 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1188 memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, 1189 psp->ta_hdcp_ucode_size); 1190 1191 psp_prep_ta_load_cmd_buf(cmd, 1192 psp->fw_pri_mc_addr, 1193 psp->ta_hdcp_ucode_size, 1194 psp->hdcp_context.hdcp_shared_mc_addr, 1195 PSP_HDCP_SHARED_MEM_SIZE); 1196 1197 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1198 1199 if (!ret) { 1200 psp->hdcp_context.hdcp_initialized = true; 1201 psp->hdcp_context.session_id = cmd->resp.session_id; 1202 mutex_init(&psp->hdcp_context.mutex); 1203 } 1204 1205 kfree(cmd); 1206 1207 return ret; 1208 } 1209 static int psp_hdcp_initialize(struct psp_context *psp) 1210 { 1211 int ret; 1212 1213 /* 1214 * TODO: bypass the initialize in sriov for now 1215 */ 1216 if (amdgpu_sriov_vf(psp->adev)) 1217 return 0; 1218 1219 if (!psp->adev->psp.ta_hdcp_ucode_size || 1220 !psp->adev->psp.ta_hdcp_start_addr) { 1221 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1222 return 0; 1223 } 1224 1225 if (!psp->hdcp_context.hdcp_initialized) { 1226 ret = psp_hdcp_init_shared_buf(psp); 1227 if (ret) 1228 return ret; 1229 } 1230 1231 ret = psp_hdcp_load(psp); 1232 if (ret) 1233 return ret; 1234 1235 return 0; 1236 } 1237 1238 static int psp_hdcp_unload(struct psp_context *psp) 1239 { 1240 int ret; 1241 struct psp_gfx_cmd_resp *cmd; 1242 1243 /* 1244 * TODO: bypass the unloading in sriov for now 1245 */ 1246 if (amdgpu_sriov_vf(psp->adev)) 1247 return 0; 1248 1249 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1250 if (!cmd) 1251 return -ENOMEM; 1252 1253 psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); 1254 1255 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1256 1257 kfree(cmd); 1258 1259 return ret; 1260 } 1261 1262 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1263 { 1264 /* 1265 * TODO: bypass the loading in sriov for now 1266 */ 1267 if (amdgpu_sriov_vf(psp->adev)) 1268 return 0; 1269 1270 return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id); 1271 } 1272 1273 static int psp_hdcp_terminate(struct psp_context *psp) 1274 { 1275 int ret; 1276 1277 /* 1278 * TODO: bypass the terminate in sriov for now 1279 */ 1280 if (amdgpu_sriov_vf(psp->adev)) 1281 return 0; 1282 1283 if (!psp->hdcp_context.hdcp_initialized) 1284 return 0; 1285 1286 ret = psp_hdcp_unload(psp); 1287 if (ret) 1288 return ret; 1289 1290 psp->hdcp_context.hdcp_initialized = false; 1291 1292 /* free hdcp shared memory */ 1293 amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, 1294 &psp->hdcp_context.hdcp_shared_mc_addr, 1295 &psp->hdcp_context.hdcp_shared_buf); 1296 1297 return 0; 1298 } 1299 // HDCP end 1300 1301 // DTM start 1302 static int psp_dtm_init_shared_buf(struct psp_context *psp) 1303 { 1304 int ret; 1305 1306 /* 1307 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1308 * physical) for dtm ta <-> Driver 1309 */ 1310 ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, 1311 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1312 &psp->dtm_context.dtm_shared_bo, 1313 &psp->dtm_context.dtm_shared_mc_addr, 1314 &psp->dtm_context.dtm_shared_buf); 1315 1316 return ret; 1317 } 1318 1319 static int psp_dtm_load(struct psp_context *psp) 1320 { 1321 int ret; 1322 struct psp_gfx_cmd_resp *cmd; 1323 1324 /* 1325 * TODO: bypass the loading in sriov for now 1326 */ 1327 if (amdgpu_sriov_vf(psp->adev)) 1328 return 0; 1329 1330 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1331 if (!cmd) 1332 return -ENOMEM; 1333 1334 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1335 memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); 1336 1337 psp_prep_ta_load_cmd_buf(cmd, 1338 psp->fw_pri_mc_addr, 1339 psp->ta_dtm_ucode_size, 1340 psp->dtm_context.dtm_shared_mc_addr, 1341 PSP_DTM_SHARED_MEM_SIZE); 1342 1343 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1344 1345 if (!ret) { 1346 psp->dtm_context.dtm_initialized = true; 1347 psp->dtm_context.session_id = cmd->resp.session_id; 1348 mutex_init(&psp->dtm_context.mutex); 1349 } 1350 1351 kfree(cmd); 1352 1353 return ret; 1354 } 1355 1356 static int psp_dtm_initialize(struct psp_context *psp) 1357 { 1358 int ret; 1359 1360 /* 1361 * TODO: bypass the initialize in sriov for now 1362 */ 1363 if (amdgpu_sriov_vf(psp->adev)) 1364 return 0; 1365 1366 if (!psp->adev->psp.ta_dtm_ucode_size || 1367 !psp->adev->psp.ta_dtm_start_addr) { 1368 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1369 return 0; 1370 } 1371 1372 if (!psp->dtm_context.dtm_initialized) { 1373 ret = psp_dtm_init_shared_buf(psp); 1374 if (ret) 1375 return ret; 1376 } 1377 1378 ret = psp_dtm_load(psp); 1379 if (ret) 1380 return ret; 1381 1382 return 0; 1383 } 1384 1385 static int psp_dtm_unload(struct psp_context *psp) 1386 { 1387 int ret; 1388 struct psp_gfx_cmd_resp *cmd; 1389 1390 /* 1391 * TODO: bypass the unloading in sriov for now 1392 */ 1393 if (amdgpu_sriov_vf(psp->adev)) 1394 return 0; 1395 1396 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1397 if (!cmd) 1398 return -ENOMEM; 1399 1400 psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id); 1401 1402 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1403 1404 kfree(cmd); 1405 1406 return ret; 1407 } 1408 1409 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1410 { 1411 /* 1412 * TODO: bypass the loading in sriov for now 1413 */ 1414 if (amdgpu_sriov_vf(psp->adev)) 1415 return 0; 1416 1417 return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id); 1418 } 1419 1420 static int psp_dtm_terminate(struct psp_context *psp) 1421 { 1422 int ret; 1423 1424 /* 1425 * TODO: bypass the terminate in sriov for now 1426 */ 1427 if (amdgpu_sriov_vf(psp->adev)) 1428 return 0; 1429 1430 if (!psp->dtm_context.dtm_initialized) 1431 return 0; 1432 1433 ret = psp_dtm_unload(psp); 1434 if (ret) 1435 return ret; 1436 1437 psp->dtm_context.dtm_initialized = false; 1438 1439 /* free hdcp shared memory */ 1440 amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, 1441 &psp->dtm_context.dtm_shared_mc_addr, 1442 &psp->dtm_context.dtm_shared_buf); 1443 1444 return 0; 1445 } 1446 // DTM end 1447 1448 // RAP start 1449 static int psp_rap_init_shared_buf(struct psp_context *psp) 1450 { 1451 int ret; 1452 1453 /* 1454 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1455 * physical) for rap ta <-> Driver 1456 */ 1457 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE, 1458 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1459 &psp->rap_context.rap_shared_bo, 1460 &psp->rap_context.rap_shared_mc_addr, 1461 &psp->rap_context.rap_shared_buf); 1462 1463 return ret; 1464 } 1465 1466 static int psp_rap_load(struct psp_context *psp) 1467 { 1468 int ret; 1469 struct psp_gfx_cmd_resp *cmd; 1470 1471 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1472 if (!cmd) 1473 return -ENOMEM; 1474 1475 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1476 memcpy(psp->fw_pri_buf, psp->ta_rap_start_addr, psp->ta_rap_ucode_size); 1477 1478 psp_prep_ta_load_cmd_buf(cmd, 1479 psp->fw_pri_mc_addr, 1480 psp->ta_rap_ucode_size, 1481 psp->rap_context.rap_shared_mc_addr, 1482 PSP_RAP_SHARED_MEM_SIZE); 1483 1484 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1485 1486 if (!ret) { 1487 psp->rap_context.rap_initialized = true; 1488 psp->rap_context.session_id = cmd->resp.session_id; 1489 mutex_init(&psp->rap_context.mutex); 1490 } 1491 1492 kfree(cmd); 1493 1494 return ret; 1495 } 1496 1497 static int psp_rap_unload(struct psp_context *psp) 1498 { 1499 int ret; 1500 struct psp_gfx_cmd_resp *cmd; 1501 1502 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1503 if (!cmd) 1504 return -ENOMEM; 1505 1506 psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id); 1507 1508 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1509 1510 kfree(cmd); 1511 1512 return ret; 1513 } 1514 1515 static int psp_rap_initialize(struct psp_context *psp) 1516 { 1517 int ret; 1518 1519 /* 1520 * TODO: bypass the initialize in sriov for now 1521 */ 1522 if (amdgpu_sriov_vf(psp->adev)) 1523 return 0; 1524 1525 if (!psp->adev->psp.ta_rap_ucode_size || 1526 !psp->adev->psp.ta_rap_start_addr) { 1527 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 1528 return 0; 1529 } 1530 1531 if (!psp->rap_context.rap_initialized) { 1532 ret = psp_rap_init_shared_buf(psp); 1533 if (ret) 1534 return ret; 1535 } 1536 1537 ret = psp_rap_load(psp); 1538 if (ret) 1539 return ret; 1540 1541 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE); 1542 if (ret != TA_RAP_STATUS__SUCCESS) { 1543 psp_rap_unload(psp); 1544 1545 amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo, 1546 &psp->rap_context.rap_shared_mc_addr, 1547 &psp->rap_context.rap_shared_buf); 1548 1549 psp->rap_context.rap_initialized = false; 1550 1551 dev_warn(psp->adev->dev, "RAP TA initialize fail.\n"); 1552 return -EINVAL; 1553 } 1554 1555 return 0; 1556 } 1557 1558 static int psp_rap_terminate(struct psp_context *psp) 1559 { 1560 int ret; 1561 1562 if (!psp->rap_context.rap_initialized) 1563 return 0; 1564 1565 ret = psp_rap_unload(psp); 1566 1567 psp->rap_context.rap_initialized = false; 1568 1569 /* free rap shared memory */ 1570 amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo, 1571 &psp->rap_context.rap_shared_mc_addr, 1572 &psp->rap_context.rap_shared_buf); 1573 1574 return ret; 1575 } 1576 1577 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1578 { 1579 struct ta_rap_shared_memory *rap_cmd; 1580 int ret; 1581 1582 if (!psp->rap_context.rap_initialized) 1583 return -EINVAL; 1584 1585 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 1586 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 1587 return -EINVAL; 1588 1589 mutex_lock(&psp->rap_context.mutex); 1590 1591 rap_cmd = (struct ta_rap_shared_memory *) 1592 psp->rap_context.rap_shared_buf; 1593 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 1594 1595 rap_cmd->cmd_id = ta_cmd_id; 1596 rap_cmd->validation_method_id = METHOD_A; 1597 1598 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id); 1599 if (ret) { 1600 mutex_unlock(&psp->rap_context.mutex); 1601 return ret; 1602 } 1603 1604 mutex_unlock(&psp->rap_context.mutex); 1605 1606 return rap_cmd->rap_status; 1607 } 1608 // RAP end 1609 1610 static int psp_hw_start(struct psp_context *psp) 1611 { 1612 struct amdgpu_device *adev = psp->adev; 1613 int ret; 1614 1615 if (!amdgpu_sriov_vf(adev)) { 1616 if (psp->kdb_bin_size && 1617 (psp->funcs->bootloader_load_kdb != NULL)) { 1618 ret = psp_bootloader_load_kdb(psp); 1619 if (ret) { 1620 DRM_ERROR("PSP load kdb failed!\n"); 1621 return ret; 1622 } 1623 } 1624 1625 if (psp->spl_bin_size) { 1626 ret = psp_bootloader_load_spl(psp); 1627 if (ret) { 1628 DRM_ERROR("PSP load spl failed!\n"); 1629 return ret; 1630 } 1631 } 1632 1633 ret = psp_bootloader_load_sysdrv(psp); 1634 if (ret) { 1635 DRM_ERROR("PSP load sysdrv failed!\n"); 1636 return ret; 1637 } 1638 1639 ret = psp_bootloader_load_sos(psp); 1640 if (ret) { 1641 DRM_ERROR("PSP load sos failed!\n"); 1642 return ret; 1643 } 1644 } 1645 1646 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 1647 if (ret) { 1648 DRM_ERROR("PSP create ring failed!\n"); 1649 return ret; 1650 } 1651 1652 ret = psp_clear_vf_fw(psp); 1653 if (ret) { 1654 DRM_ERROR("PSP clear vf fw!\n"); 1655 return ret; 1656 } 1657 1658 ret = psp_tmr_init(psp); 1659 if (ret) { 1660 DRM_ERROR("PSP tmr init failed!\n"); 1661 return ret; 1662 } 1663 1664 /* 1665 * For ASICs with DF Cstate management centralized 1666 * to PMFW, TMR setup should be performed after PMFW 1667 * loaded and before other non-psp firmware loaded. 1668 */ 1669 if (psp->pmfw_centralized_cstate_management) { 1670 ret = psp_load_smu_fw(psp); 1671 if (ret) 1672 return ret; 1673 } 1674 1675 ret = psp_tmr_load(psp); 1676 if (ret) { 1677 DRM_ERROR("PSP load tmr failed!\n"); 1678 return ret; 1679 } 1680 1681 return 0; 1682 } 1683 1684 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 1685 enum psp_gfx_fw_type *type) 1686 { 1687 switch (ucode->ucode_id) { 1688 case AMDGPU_UCODE_ID_SDMA0: 1689 *type = GFX_FW_TYPE_SDMA0; 1690 break; 1691 case AMDGPU_UCODE_ID_SDMA1: 1692 *type = GFX_FW_TYPE_SDMA1; 1693 break; 1694 case AMDGPU_UCODE_ID_SDMA2: 1695 *type = GFX_FW_TYPE_SDMA2; 1696 break; 1697 case AMDGPU_UCODE_ID_SDMA3: 1698 *type = GFX_FW_TYPE_SDMA3; 1699 break; 1700 case AMDGPU_UCODE_ID_SDMA4: 1701 *type = GFX_FW_TYPE_SDMA4; 1702 break; 1703 case AMDGPU_UCODE_ID_SDMA5: 1704 *type = GFX_FW_TYPE_SDMA5; 1705 break; 1706 case AMDGPU_UCODE_ID_SDMA6: 1707 *type = GFX_FW_TYPE_SDMA6; 1708 break; 1709 case AMDGPU_UCODE_ID_SDMA7: 1710 *type = GFX_FW_TYPE_SDMA7; 1711 break; 1712 case AMDGPU_UCODE_ID_CP_MES: 1713 *type = GFX_FW_TYPE_CP_MES; 1714 break; 1715 case AMDGPU_UCODE_ID_CP_MES_DATA: 1716 *type = GFX_FW_TYPE_MES_STACK; 1717 break; 1718 case AMDGPU_UCODE_ID_CP_CE: 1719 *type = GFX_FW_TYPE_CP_CE; 1720 break; 1721 case AMDGPU_UCODE_ID_CP_PFP: 1722 *type = GFX_FW_TYPE_CP_PFP; 1723 break; 1724 case AMDGPU_UCODE_ID_CP_ME: 1725 *type = GFX_FW_TYPE_CP_ME; 1726 break; 1727 case AMDGPU_UCODE_ID_CP_MEC1: 1728 *type = GFX_FW_TYPE_CP_MEC; 1729 break; 1730 case AMDGPU_UCODE_ID_CP_MEC1_JT: 1731 *type = GFX_FW_TYPE_CP_MEC_ME1; 1732 break; 1733 case AMDGPU_UCODE_ID_CP_MEC2: 1734 *type = GFX_FW_TYPE_CP_MEC; 1735 break; 1736 case AMDGPU_UCODE_ID_CP_MEC2_JT: 1737 *type = GFX_FW_TYPE_CP_MEC_ME2; 1738 break; 1739 case AMDGPU_UCODE_ID_RLC_G: 1740 *type = GFX_FW_TYPE_RLC_G; 1741 break; 1742 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 1743 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 1744 break; 1745 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 1746 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 1747 break; 1748 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 1749 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 1750 break; 1751 case AMDGPU_UCODE_ID_SMC: 1752 *type = GFX_FW_TYPE_SMU; 1753 break; 1754 case AMDGPU_UCODE_ID_UVD: 1755 *type = GFX_FW_TYPE_UVD; 1756 break; 1757 case AMDGPU_UCODE_ID_UVD1: 1758 *type = GFX_FW_TYPE_UVD1; 1759 break; 1760 case AMDGPU_UCODE_ID_VCE: 1761 *type = GFX_FW_TYPE_VCE; 1762 break; 1763 case AMDGPU_UCODE_ID_VCN: 1764 *type = GFX_FW_TYPE_VCN; 1765 break; 1766 case AMDGPU_UCODE_ID_VCN1: 1767 *type = GFX_FW_TYPE_VCN1; 1768 break; 1769 case AMDGPU_UCODE_ID_DMCU_ERAM: 1770 *type = GFX_FW_TYPE_DMCU_ERAM; 1771 break; 1772 case AMDGPU_UCODE_ID_DMCU_INTV: 1773 *type = GFX_FW_TYPE_DMCU_ISR; 1774 break; 1775 case AMDGPU_UCODE_ID_VCN0_RAM: 1776 *type = GFX_FW_TYPE_VCN0_RAM; 1777 break; 1778 case AMDGPU_UCODE_ID_VCN1_RAM: 1779 *type = GFX_FW_TYPE_VCN1_RAM; 1780 break; 1781 case AMDGPU_UCODE_ID_DMCUB: 1782 *type = GFX_FW_TYPE_DMUB; 1783 break; 1784 case AMDGPU_UCODE_ID_MAXIMUM: 1785 default: 1786 return -EINVAL; 1787 } 1788 1789 return 0; 1790 } 1791 1792 static void psp_print_fw_hdr(struct psp_context *psp, 1793 struct amdgpu_firmware_info *ucode) 1794 { 1795 struct amdgpu_device *adev = psp->adev; 1796 struct common_firmware_header *hdr; 1797 1798 switch (ucode->ucode_id) { 1799 case AMDGPU_UCODE_ID_SDMA0: 1800 case AMDGPU_UCODE_ID_SDMA1: 1801 case AMDGPU_UCODE_ID_SDMA2: 1802 case AMDGPU_UCODE_ID_SDMA3: 1803 case AMDGPU_UCODE_ID_SDMA4: 1804 case AMDGPU_UCODE_ID_SDMA5: 1805 case AMDGPU_UCODE_ID_SDMA6: 1806 case AMDGPU_UCODE_ID_SDMA7: 1807 hdr = (struct common_firmware_header *) 1808 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 1809 amdgpu_ucode_print_sdma_hdr(hdr); 1810 break; 1811 case AMDGPU_UCODE_ID_CP_CE: 1812 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 1813 amdgpu_ucode_print_gfx_hdr(hdr); 1814 break; 1815 case AMDGPU_UCODE_ID_CP_PFP: 1816 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 1817 amdgpu_ucode_print_gfx_hdr(hdr); 1818 break; 1819 case AMDGPU_UCODE_ID_CP_ME: 1820 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 1821 amdgpu_ucode_print_gfx_hdr(hdr); 1822 break; 1823 case AMDGPU_UCODE_ID_CP_MEC1: 1824 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 1825 amdgpu_ucode_print_gfx_hdr(hdr); 1826 break; 1827 case AMDGPU_UCODE_ID_RLC_G: 1828 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 1829 amdgpu_ucode_print_rlc_hdr(hdr); 1830 break; 1831 case AMDGPU_UCODE_ID_SMC: 1832 hdr = (struct common_firmware_header *)adev->pm.fw->data; 1833 amdgpu_ucode_print_smc_hdr(hdr); 1834 break; 1835 default: 1836 break; 1837 } 1838 } 1839 1840 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, 1841 struct psp_gfx_cmd_resp *cmd) 1842 { 1843 int ret; 1844 uint64_t fw_mem_mc_addr = ucode->mc_addr; 1845 1846 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 1847 1848 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1849 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 1850 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 1851 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 1852 1853 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 1854 if (ret) 1855 DRM_ERROR("Unknown firmware type\n"); 1856 1857 return ret; 1858 } 1859 1860 static int psp_execute_np_fw_load(struct psp_context *psp, 1861 struct amdgpu_firmware_info *ucode) 1862 { 1863 int ret = 0; 1864 1865 ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd); 1866 if (ret) 1867 return ret; 1868 1869 ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, 1870 psp->fence_buf_mc_addr); 1871 1872 return ret; 1873 } 1874 1875 static int psp_load_smu_fw(struct psp_context *psp) 1876 { 1877 int ret; 1878 struct amdgpu_device* adev = psp->adev; 1879 struct amdgpu_firmware_info *ucode = 1880 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 1881 struct amdgpu_ras *ras = psp->ras.ras; 1882 1883 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 1884 return 0; 1885 1886 1887 if (amdgpu_in_reset(adev) && ras && ras->supported) { 1888 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 1889 if (ret) { 1890 DRM_WARN("Failed to set MP1 state prepare for reload\n"); 1891 } 1892 } 1893 1894 ret = psp_execute_np_fw_load(psp, ucode); 1895 1896 if (ret) 1897 DRM_ERROR("PSP load smu failed!\n"); 1898 1899 return ret; 1900 } 1901 1902 static bool fw_load_skip_check(struct psp_context *psp, 1903 struct amdgpu_firmware_info *ucode) 1904 { 1905 if (!ucode->fw) 1906 return true; 1907 1908 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1909 (psp_smu_reload_quirk(psp) || 1910 psp->autoload_supported || 1911 psp->pmfw_centralized_cstate_management)) 1912 return true; 1913 1914 if (amdgpu_sriov_vf(psp->adev) && 1915 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 1916 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 1917 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 1918 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 1919 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 1920 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 1921 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 1922 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 1923 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G 1924 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 1925 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 1926 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 1927 || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) 1928 /*skip ucode loading in SRIOV VF */ 1929 return true; 1930 1931 if (psp->autoload_supported && 1932 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 1933 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 1934 /* skip mec JT when autoload is enabled */ 1935 return true; 1936 1937 return false; 1938 } 1939 1940 static int psp_np_fw_load(struct psp_context *psp) 1941 { 1942 int i, ret; 1943 struct amdgpu_firmware_info *ucode; 1944 struct amdgpu_device* adev = psp->adev; 1945 1946 if (psp->autoload_supported && 1947 !psp->pmfw_centralized_cstate_management) { 1948 ret = psp_load_smu_fw(psp); 1949 if (ret) 1950 return ret; 1951 } 1952 1953 for (i = 0; i < adev->firmware.max_ucodes; i++) { 1954 ucode = &adev->firmware.ucode[i]; 1955 1956 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1957 !fw_load_skip_check(psp, ucode)) { 1958 ret = psp_load_smu_fw(psp); 1959 if (ret) 1960 return ret; 1961 continue; 1962 } 1963 1964 if (fw_load_skip_check(psp, ucode)) 1965 continue; 1966 1967 if (psp->autoload_supported && 1968 (adev->asic_type == CHIP_SIENNA_CICHLID || 1969 adev->asic_type == CHIP_NAVY_FLOUNDER) && 1970 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 1971 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 1972 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 1973 /* PSP only receive one SDMA fw for sienna_cichlid, 1974 * as all four sdma fw are same */ 1975 continue; 1976 1977 psp_print_fw_hdr(psp, ucode); 1978 1979 ret = psp_execute_np_fw_load(psp, ucode); 1980 if (ret) 1981 return ret; 1982 1983 /* Start rlc autoload after psp recieved all the gfx firmware */ 1984 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 1985 AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { 1986 ret = psp_rlc_autoload_start(psp); 1987 if (ret) { 1988 DRM_ERROR("Failed to start rlc autoload\n"); 1989 return ret; 1990 } 1991 } 1992 } 1993 1994 return 0; 1995 } 1996 1997 static int psp_load_fw(struct amdgpu_device *adev) 1998 { 1999 int ret; 2000 struct psp_context *psp = &adev->psp; 2001 2002 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2003 psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ 2004 goto skip_memalloc; 2005 } 2006 2007 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 2008 if (!psp->cmd) 2009 return -ENOMEM; 2010 2011 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 2012 AMDGPU_GEM_DOMAIN_GTT, 2013 &psp->fw_pri_bo, 2014 &psp->fw_pri_mc_addr, 2015 &psp->fw_pri_buf); 2016 if (ret) 2017 goto failed; 2018 2019 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 2020 AMDGPU_GEM_DOMAIN_VRAM, 2021 &psp->fence_buf_bo, 2022 &psp->fence_buf_mc_addr, 2023 &psp->fence_buf); 2024 if (ret) 2025 goto failed; 2026 2027 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 2028 AMDGPU_GEM_DOMAIN_VRAM, 2029 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 2030 (void **)&psp->cmd_buf_mem); 2031 if (ret) 2032 goto failed; 2033 2034 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2035 2036 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2037 if (ret) { 2038 DRM_ERROR("PSP ring init failed!\n"); 2039 goto failed; 2040 } 2041 2042 skip_memalloc: 2043 ret = psp_hw_start(psp); 2044 if (ret) 2045 goto failed; 2046 2047 ret = psp_np_fw_load(psp); 2048 if (ret) 2049 goto failed; 2050 2051 ret = psp_asd_load(psp); 2052 if (ret) { 2053 DRM_ERROR("PSP load asd failed!\n"); 2054 return ret; 2055 } 2056 2057 if (psp->adev->psp.ta_fw) { 2058 ret = psp_ras_initialize(psp); 2059 if (ret) 2060 dev_err(psp->adev->dev, 2061 "RAS: Failed to initialize RAS\n"); 2062 2063 ret = psp_hdcp_initialize(psp); 2064 if (ret) 2065 dev_err(psp->adev->dev, 2066 "HDCP: Failed to initialize HDCP\n"); 2067 2068 ret = psp_dtm_initialize(psp); 2069 if (ret) 2070 dev_err(psp->adev->dev, 2071 "DTM: Failed to initialize DTM\n"); 2072 2073 ret = psp_rap_initialize(psp); 2074 if (ret) 2075 dev_err(psp->adev->dev, 2076 "RAP: Failed to initialize RAP\n"); 2077 } 2078 2079 return 0; 2080 2081 failed: 2082 /* 2083 * all cleanup jobs (xgmi terminate, ras terminate, 2084 * ring destroy, cmd/fence/fw buffers destory, 2085 * psp->cmd destory) are delayed to psp_hw_fini 2086 */ 2087 return ret; 2088 } 2089 2090 static int psp_hw_init(void *handle) 2091 { 2092 int ret; 2093 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2094 2095 mutex_lock(&adev->firmware.mutex); 2096 /* 2097 * This sequence is just used on hw_init only once, no need on 2098 * resume. 2099 */ 2100 ret = amdgpu_ucode_init_bo(adev); 2101 if (ret) 2102 goto failed; 2103 2104 ret = psp_load_fw(adev); 2105 if (ret) { 2106 DRM_ERROR("PSP firmware loading failed\n"); 2107 goto failed; 2108 } 2109 2110 mutex_unlock(&adev->firmware.mutex); 2111 return 0; 2112 2113 failed: 2114 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 2115 mutex_unlock(&adev->firmware.mutex); 2116 return -EINVAL; 2117 } 2118 2119 static int psp_hw_fini(void *handle) 2120 { 2121 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2122 struct psp_context *psp = &adev->psp; 2123 int ret; 2124 2125 if (psp->adev->psp.ta_fw) { 2126 psp_ras_terminate(psp); 2127 psp_rap_terminate(psp); 2128 psp_dtm_terminate(psp); 2129 psp_hdcp_terminate(psp); 2130 } 2131 2132 psp_asd_unload(psp); 2133 ret = psp_clear_vf_fw(psp); 2134 if (ret) { 2135 DRM_ERROR("PSP clear vf fw!\n"); 2136 return ret; 2137 } 2138 2139 psp_tmr_terminate(psp); 2140 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2141 2142 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 2143 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 2144 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 2145 &psp->fence_buf_mc_addr, &psp->fence_buf); 2146 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 2147 (void **)&psp->cmd_buf_mem); 2148 2149 kfree(psp->cmd); 2150 psp->cmd = NULL; 2151 2152 return 0; 2153 } 2154 2155 static int psp_suspend(void *handle) 2156 { 2157 int ret; 2158 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2159 struct psp_context *psp = &adev->psp; 2160 2161 if (adev->gmc.xgmi.num_physical_nodes > 1 && 2162 psp->xgmi_context.initialized == 1) { 2163 ret = psp_xgmi_terminate(psp); 2164 if (ret) { 2165 DRM_ERROR("Failed to terminate xgmi ta\n"); 2166 return ret; 2167 } 2168 } 2169 2170 if (psp->adev->psp.ta_fw) { 2171 ret = psp_ras_terminate(psp); 2172 if (ret) { 2173 DRM_ERROR("Failed to terminate ras ta\n"); 2174 return ret; 2175 } 2176 ret = psp_hdcp_terminate(psp); 2177 if (ret) { 2178 DRM_ERROR("Failed to terminate hdcp ta\n"); 2179 return ret; 2180 } 2181 ret = psp_dtm_terminate(psp); 2182 if (ret) { 2183 DRM_ERROR("Failed to terminate dtm ta\n"); 2184 return ret; 2185 } 2186 ret = psp_rap_terminate(psp); 2187 if (ret) { 2188 DRM_ERROR("Failed to terminate rap ta\n"); 2189 return ret; 2190 } 2191 } 2192 2193 ret = psp_asd_unload(psp); 2194 if (ret) { 2195 DRM_ERROR("Failed to unload asd\n"); 2196 return ret; 2197 } 2198 2199 ret = psp_tmr_terminate(psp); 2200 if (ret) { 2201 DRM_ERROR("Failed to terminate tmr\n"); 2202 return ret; 2203 } 2204 2205 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 2206 if (ret) { 2207 DRM_ERROR("PSP ring stop failed\n"); 2208 return ret; 2209 } 2210 2211 return 0; 2212 } 2213 2214 static int psp_resume(void *handle) 2215 { 2216 int ret; 2217 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2218 struct psp_context *psp = &adev->psp; 2219 2220 DRM_INFO("PSP is resuming...\n"); 2221 2222 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 2223 if (ret) { 2224 DRM_ERROR("Failed to process memory training!\n"); 2225 return ret; 2226 } 2227 2228 mutex_lock(&adev->firmware.mutex); 2229 2230 ret = psp_hw_start(psp); 2231 if (ret) 2232 goto failed; 2233 2234 ret = psp_np_fw_load(psp); 2235 if (ret) 2236 goto failed; 2237 2238 ret = psp_asd_load(psp); 2239 if (ret) { 2240 DRM_ERROR("PSP load asd failed!\n"); 2241 goto failed; 2242 } 2243 2244 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2245 ret = psp_xgmi_initialize(psp); 2246 /* Warning the XGMI seesion initialize failure 2247 * Instead of stop driver initialization 2248 */ 2249 if (ret) 2250 dev_err(psp->adev->dev, 2251 "XGMI: Failed to initialize XGMI session\n"); 2252 } 2253 2254 if (psp->adev->psp.ta_fw) { 2255 ret = psp_ras_initialize(psp); 2256 if (ret) 2257 dev_err(psp->adev->dev, 2258 "RAS: Failed to initialize RAS\n"); 2259 2260 ret = psp_hdcp_initialize(psp); 2261 if (ret) 2262 dev_err(psp->adev->dev, 2263 "HDCP: Failed to initialize HDCP\n"); 2264 2265 ret = psp_dtm_initialize(psp); 2266 if (ret) 2267 dev_err(psp->adev->dev, 2268 "DTM: Failed to initialize DTM\n"); 2269 2270 ret = psp_rap_initialize(psp); 2271 if (ret) 2272 dev_err(psp->adev->dev, 2273 "RAP: Failed to initialize RAP\n"); 2274 } 2275 2276 mutex_unlock(&adev->firmware.mutex); 2277 2278 return 0; 2279 2280 failed: 2281 DRM_ERROR("PSP resume failed\n"); 2282 mutex_unlock(&adev->firmware.mutex); 2283 return ret; 2284 } 2285 2286 int psp_gpu_reset(struct amdgpu_device *adev) 2287 { 2288 int ret; 2289 2290 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 2291 return 0; 2292 2293 mutex_lock(&adev->psp.mutex); 2294 ret = psp_mode1_reset(&adev->psp); 2295 mutex_unlock(&adev->psp.mutex); 2296 2297 return ret; 2298 } 2299 2300 int psp_rlc_autoload_start(struct psp_context *psp) 2301 { 2302 int ret; 2303 struct psp_gfx_cmd_resp *cmd; 2304 2305 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 2306 if (!cmd) 2307 return -ENOMEM; 2308 2309 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 2310 2311 ret = psp_cmd_submit_buf(psp, NULL, cmd, 2312 psp->fence_buf_mc_addr); 2313 kfree(cmd); 2314 return ret; 2315 } 2316 2317 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, 2318 uint64_t cmd_gpu_addr, int cmd_size) 2319 { 2320 struct amdgpu_firmware_info ucode = {0}; 2321 2322 ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : 2323 AMDGPU_UCODE_ID_VCN0_RAM; 2324 ucode.mc_addr = cmd_gpu_addr; 2325 ucode.ucode_size = cmd_size; 2326 2327 return psp_execute_np_fw_load(&adev->psp, &ucode); 2328 } 2329 2330 int psp_ring_cmd_submit(struct psp_context *psp, 2331 uint64_t cmd_buf_mc_addr, 2332 uint64_t fence_mc_addr, 2333 int index) 2334 { 2335 unsigned int psp_write_ptr_reg = 0; 2336 struct psp_gfx_rb_frame *write_frame; 2337 struct psp_ring *ring = &psp->km_ring; 2338 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 2339 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 2340 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 2341 struct amdgpu_device *adev = psp->adev; 2342 uint32_t ring_size_dw = ring->ring_size / 4; 2343 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 2344 2345 /* KM (GPCOM) prepare write pointer */ 2346 psp_write_ptr_reg = psp_ring_get_wptr(psp); 2347 2348 /* Update KM RB frame pointer to new frame */ 2349 /* write_frame ptr increments by size of rb_frame in bytes */ 2350 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 2351 if ((psp_write_ptr_reg % ring_size_dw) == 0) 2352 write_frame = ring_buffer_start; 2353 else 2354 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 2355 /* Check invalid write_frame ptr address */ 2356 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 2357 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 2358 ring_buffer_start, ring_buffer_end, write_frame); 2359 DRM_ERROR("write_frame is pointing to address out of bounds\n"); 2360 return -EINVAL; 2361 } 2362 2363 /* Initialize KM RB frame */ 2364 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 2365 2366 /* Update KM RB frame */ 2367 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 2368 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 2369 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 2370 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 2371 write_frame->fence_value = index; 2372 amdgpu_asic_flush_hdp(adev, NULL); 2373 2374 /* Update the write Pointer in DWORDs */ 2375 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 2376 psp_ring_set_wptr(psp, psp_write_ptr_reg); 2377 return 0; 2378 } 2379 2380 int psp_init_asd_microcode(struct psp_context *psp, 2381 const char *chip_name) 2382 { 2383 struct amdgpu_device *adev = psp->adev; 2384 char fw_name[30]; 2385 const struct psp_firmware_header_v1_0 *asd_hdr; 2386 int err = 0; 2387 2388 if (!chip_name) { 2389 dev_err(adev->dev, "invalid chip name for asd microcode\n"); 2390 return -EINVAL; 2391 } 2392 2393 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 2394 err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); 2395 if (err) 2396 goto out; 2397 2398 err = amdgpu_ucode_validate(adev->psp.asd_fw); 2399 if (err) 2400 goto out; 2401 2402 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 2403 adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 2404 adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); 2405 adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 2406 adev->psp.asd_start_addr = (uint8_t *)asd_hdr + 2407 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 2408 return 0; 2409 out: 2410 dev_err(adev->dev, "fail to initialize asd microcode\n"); 2411 release_firmware(adev->psp.asd_fw); 2412 adev->psp.asd_fw = NULL; 2413 return err; 2414 } 2415 2416 int psp_init_sos_microcode(struct psp_context *psp, 2417 const char *chip_name) 2418 { 2419 struct amdgpu_device *adev = psp->adev; 2420 char fw_name[30]; 2421 const struct psp_firmware_header_v1_0 *sos_hdr; 2422 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 2423 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 2424 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 2425 int err = 0; 2426 2427 if (!chip_name) { 2428 dev_err(adev->dev, "invalid chip name for sos microcode\n"); 2429 return -EINVAL; 2430 } 2431 2432 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); 2433 err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); 2434 if (err) 2435 goto out; 2436 2437 err = amdgpu_ucode_validate(adev->psp.sos_fw); 2438 if (err) 2439 goto out; 2440 2441 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 2442 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 2443 2444 switch (sos_hdr->header.header_version_major) { 2445 case 1: 2446 adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 2447 adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); 2448 adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); 2449 adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes); 2450 adev->psp.sys_start_addr = (uint8_t *)sos_hdr + 2451 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 2452 adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2453 le32_to_cpu(sos_hdr->sos_offset_bytes); 2454 if (sos_hdr->header.header_version_minor == 1) { 2455 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 2456 adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes); 2457 adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2458 le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes); 2459 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes); 2460 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2461 le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes); 2462 } 2463 if (sos_hdr->header.header_version_minor == 2) { 2464 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 2465 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes); 2466 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2467 le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes); 2468 } 2469 if (sos_hdr->header.header_version_minor == 3) { 2470 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 2471 adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc_size_bytes); 2472 adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2473 le32_to_cpu(sos_hdr_v1_3->v1_1.toc_offset_bytes); 2474 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_size_bytes); 2475 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2476 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_offset_bytes); 2477 adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes); 2478 adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2479 le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes); 2480 } 2481 break; 2482 default: 2483 dev_err(adev->dev, 2484 "unsupported psp sos firmware\n"); 2485 err = -EINVAL; 2486 goto out; 2487 } 2488 2489 return 0; 2490 out: 2491 dev_err(adev->dev, 2492 "failed to init sos firmware\n"); 2493 release_firmware(adev->psp.sos_fw); 2494 adev->psp.sos_fw = NULL; 2495 2496 return err; 2497 } 2498 2499 int parse_ta_bin_descriptor(struct psp_context *psp, 2500 const struct ta_fw_bin_desc *desc, 2501 const struct ta_firmware_header_v2_0 *ta_hdr) 2502 { 2503 uint8_t *ucode_start_addr = NULL; 2504 2505 if (!psp || !desc || !ta_hdr) 2506 return -EINVAL; 2507 2508 ucode_start_addr = (uint8_t *)ta_hdr + 2509 le32_to_cpu(desc->offset_bytes) + 2510 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 2511 2512 switch (desc->fw_type) { 2513 case TA_FW_TYPE_PSP_ASD: 2514 psp->asd_fw_version = le32_to_cpu(desc->fw_version); 2515 psp->asd_feature_version = le32_to_cpu(desc->fw_version); 2516 psp->asd_ucode_size = le32_to_cpu(desc->size_bytes); 2517 psp->asd_start_addr = ucode_start_addr; 2518 break; 2519 case TA_FW_TYPE_PSP_XGMI: 2520 psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version); 2521 psp->ta_xgmi_ucode_size = le32_to_cpu(desc->size_bytes); 2522 psp->ta_xgmi_start_addr = ucode_start_addr; 2523 break; 2524 case TA_FW_TYPE_PSP_RAS: 2525 psp->ta_ras_ucode_version = le32_to_cpu(desc->fw_version); 2526 psp->ta_ras_ucode_size = le32_to_cpu(desc->size_bytes); 2527 psp->ta_ras_start_addr = ucode_start_addr; 2528 break; 2529 case TA_FW_TYPE_PSP_HDCP: 2530 psp->ta_hdcp_ucode_version = le32_to_cpu(desc->fw_version); 2531 psp->ta_hdcp_ucode_size = le32_to_cpu(desc->size_bytes); 2532 psp->ta_hdcp_start_addr = ucode_start_addr; 2533 break; 2534 case TA_FW_TYPE_PSP_DTM: 2535 psp->ta_dtm_ucode_version = le32_to_cpu(desc->fw_version); 2536 psp->ta_dtm_ucode_size = le32_to_cpu(desc->size_bytes); 2537 psp->ta_dtm_start_addr = ucode_start_addr; 2538 break; 2539 case TA_FW_TYPE_PSP_RAP: 2540 psp->ta_rap_ucode_version = le32_to_cpu(desc->fw_version); 2541 psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes); 2542 psp->ta_rap_start_addr = ucode_start_addr; 2543 break; 2544 default: 2545 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 2546 break; 2547 } 2548 2549 return 0; 2550 } 2551 2552 int psp_init_ta_microcode(struct psp_context *psp, 2553 const char *chip_name) 2554 { 2555 struct amdgpu_device *adev = psp->adev; 2556 char fw_name[30]; 2557 const struct ta_firmware_header_v2_0 *ta_hdr; 2558 int err = 0; 2559 int ta_index = 0; 2560 2561 if (!chip_name) { 2562 dev_err(adev->dev, "invalid chip name for ta microcode\n"); 2563 return -EINVAL; 2564 } 2565 2566 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 2567 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); 2568 if (err) 2569 goto out; 2570 2571 err = amdgpu_ucode_validate(adev->psp.ta_fw); 2572 if (err) 2573 goto out; 2574 2575 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 2576 2577 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) { 2578 dev_err(adev->dev, "unsupported TA header version\n"); 2579 err = -EINVAL; 2580 goto out; 2581 } 2582 2583 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_TA_PACKAGING) { 2584 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 2585 err = -EINVAL; 2586 goto out; 2587 } 2588 2589 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 2590 err = parse_ta_bin_descriptor(psp, 2591 &ta_hdr->ta_fw_bin[ta_index], 2592 ta_hdr); 2593 if (err) 2594 goto out; 2595 } 2596 2597 return 0; 2598 out: 2599 dev_err(adev->dev, "fail to initialize ta microcode\n"); 2600 release_firmware(adev->psp.ta_fw); 2601 adev->psp.ta_fw = NULL; 2602 return err; 2603 } 2604 2605 static int psp_set_clockgating_state(void *handle, 2606 enum amd_clockgating_state state) 2607 { 2608 return 0; 2609 } 2610 2611 static int psp_set_powergating_state(void *handle, 2612 enum amd_powergating_state state) 2613 { 2614 return 0; 2615 } 2616 2617 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 2618 struct device_attribute *attr, 2619 char *buf) 2620 { 2621 struct drm_device *ddev = dev_get_drvdata(dev); 2622 struct amdgpu_device *adev = drm_to_adev(ddev); 2623 uint32_t fw_ver; 2624 int ret; 2625 2626 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 2627 DRM_INFO("PSP block is not ready yet."); 2628 return -EBUSY; 2629 } 2630 2631 mutex_lock(&adev->psp.mutex); 2632 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 2633 mutex_unlock(&adev->psp.mutex); 2634 2635 if (ret) { 2636 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); 2637 return ret; 2638 } 2639 2640 return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); 2641 } 2642 2643 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 2644 struct device_attribute *attr, 2645 const char *buf, 2646 size_t count) 2647 { 2648 struct drm_device *ddev = dev_get_drvdata(dev); 2649 struct amdgpu_device *adev = drm_to_adev(ddev); 2650 void *cpu_addr; 2651 dma_addr_t dma_addr; 2652 int ret; 2653 char fw_name[100]; 2654 const struct firmware *usbc_pd_fw; 2655 2656 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 2657 DRM_INFO("PSP block is not ready yet."); 2658 return -EBUSY; 2659 } 2660 2661 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 2662 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 2663 if (ret) 2664 goto fail; 2665 2666 /* We need contiguous physical mem to place the FW for psp to access */ 2667 cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); 2668 2669 ret = dma_mapping_error(adev->dev, dma_addr); 2670 if (ret) 2671 goto rel_buf; 2672 2673 memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 2674 2675 /* 2676 * x86 specific workaround. 2677 * Without it the buffer is invisible in PSP. 2678 * 2679 * TODO Remove once PSP starts snooping CPU cache 2680 */ 2681 #ifdef CONFIG_X86 2682 clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); 2683 #endif 2684 2685 mutex_lock(&adev->psp.mutex); 2686 ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); 2687 mutex_unlock(&adev->psp.mutex); 2688 2689 rel_buf: 2690 dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); 2691 release_firmware(usbc_pd_fw); 2692 2693 fail: 2694 if (ret) { 2695 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); 2696 return ret; 2697 } 2698 2699 return count; 2700 } 2701 2702 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, 2703 psp_usbc_pd_fw_sysfs_read, 2704 psp_usbc_pd_fw_sysfs_write); 2705 2706 2707 2708 const struct amd_ip_funcs psp_ip_funcs = { 2709 .name = "psp", 2710 .early_init = psp_early_init, 2711 .late_init = NULL, 2712 .sw_init = psp_sw_init, 2713 .sw_fini = psp_sw_fini, 2714 .hw_init = psp_hw_init, 2715 .hw_fini = psp_hw_fini, 2716 .suspend = psp_suspend, 2717 .resume = psp_resume, 2718 .is_idle = NULL, 2719 .check_soft_reset = NULL, 2720 .wait_for_idle = NULL, 2721 .soft_reset = NULL, 2722 .set_clockgating_state = psp_set_clockgating_state, 2723 .set_powergating_state = psp_set_powergating_state, 2724 }; 2725 2726 static int psp_sysfs_init(struct amdgpu_device *adev) 2727 { 2728 int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); 2729 2730 if (ret) 2731 DRM_ERROR("Failed to create USBC PD FW control file!"); 2732 2733 return ret; 2734 } 2735 2736 static void psp_sysfs_fini(struct amdgpu_device *adev) 2737 { 2738 device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); 2739 } 2740 2741 const struct amdgpu_ip_block_version psp_v3_1_ip_block = 2742 { 2743 .type = AMD_IP_BLOCK_TYPE_PSP, 2744 .major = 3, 2745 .minor = 1, 2746 .rev = 0, 2747 .funcs = &psp_ip_funcs, 2748 }; 2749 2750 const struct amdgpu_ip_block_version psp_v10_0_ip_block = 2751 { 2752 .type = AMD_IP_BLOCK_TYPE_PSP, 2753 .major = 10, 2754 .minor = 0, 2755 .rev = 0, 2756 .funcs = &psp_ip_funcs, 2757 }; 2758 2759 const struct amdgpu_ip_block_version psp_v11_0_ip_block = 2760 { 2761 .type = AMD_IP_BLOCK_TYPE_PSP, 2762 .major = 11, 2763 .minor = 0, 2764 .rev = 0, 2765 .funcs = &psp_ip_funcs, 2766 }; 2767 2768 const struct amdgpu_ip_block_version psp_v12_0_ip_block = 2769 { 2770 .type = AMD_IP_BLOCK_TYPE_PSP, 2771 .major = 12, 2772 .minor = 0, 2773 .rev = 0, 2774 .funcs = &psp_ip_funcs, 2775 }; 2776