1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <drm/drm_drv.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_xgmi.h" 33 #include "soc15_common.h" 34 #include "psp_v3_1.h" 35 #include "psp_v10_0.h" 36 #include "psp_v11_0.h" 37 #include "psp_v11_0_8.h" 38 #include "psp_v12_0.h" 39 #include "psp_v13_0.h" 40 #include "psp_v13_0_4.h" 41 42 #include "amdgpu_ras.h" 43 #include "amdgpu_securedisplay.h" 44 #include "amdgpu_atomfirmware.h" 45 46 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) 47 48 static int psp_sysfs_init(struct amdgpu_device *adev); 49 static void psp_sysfs_fini(struct amdgpu_device *adev); 50 51 static int psp_load_smu_fw(struct psp_context *psp); 52 static int psp_rap_terminate(struct psp_context *psp); 53 static int psp_securedisplay_terminate(struct psp_context *psp); 54 55 static int psp_ring_init(struct psp_context *psp, 56 enum psp_ring_type ring_type) 57 { 58 int ret = 0; 59 struct psp_ring *ring; 60 struct amdgpu_device *adev = psp->adev; 61 62 ring = &psp->km_ring; 63 64 ring->ring_type = ring_type; 65 66 /* allocate 4k Page of Local Frame Buffer memory for ring */ 67 ring->ring_size = 0x1000; 68 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 69 AMDGPU_GEM_DOMAIN_VRAM | 70 AMDGPU_GEM_DOMAIN_GTT, 71 &adev->firmware.rbuf, 72 &ring->ring_mem_mc_addr, 73 (void **)&ring->ring_mem); 74 if (ret) { 75 ring->ring_size = 0; 76 return ret; 77 } 78 79 return 0; 80 } 81 82 /* 83 * Due to DF Cstate management centralized to PMFW, the firmware 84 * loading sequence will be updated as below: 85 * - Load KDB 86 * - Load SYS_DRV 87 * - Load tOS 88 * - Load PMFW 89 * - Setup TMR 90 * - Load other non-psp fw 91 * - Load ASD 92 * - Load XGMI/RAS/HDCP/DTM TA if any 93 * 94 * This new sequence is required for 95 * - Arcturus and onwards 96 */ 97 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 98 { 99 struct amdgpu_device *adev = psp->adev; 100 101 if (amdgpu_sriov_vf(adev)) { 102 psp->pmfw_centralized_cstate_management = false; 103 return; 104 } 105 106 switch (adev->ip_versions[MP0_HWIP][0]) { 107 case IP_VERSION(11, 0, 0): 108 case IP_VERSION(11, 0, 4): 109 case IP_VERSION(11, 0, 5): 110 case IP_VERSION(11, 0, 7): 111 case IP_VERSION(11, 0, 9): 112 case IP_VERSION(11, 0, 11): 113 case IP_VERSION(11, 0, 12): 114 case IP_VERSION(11, 0, 13): 115 case IP_VERSION(13, 0, 0): 116 case IP_VERSION(13, 0, 2): 117 case IP_VERSION(13, 0, 7): 118 psp->pmfw_centralized_cstate_management = true; 119 break; 120 default: 121 psp->pmfw_centralized_cstate_management = false; 122 break; 123 } 124 } 125 126 static int psp_init_sriov_microcode(struct psp_context *psp) 127 { 128 struct amdgpu_device *adev = psp->adev; 129 char ucode_prefix[30]; 130 int ret = 0; 131 132 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 133 134 switch (adev->ip_versions[MP0_HWIP][0]) { 135 case IP_VERSION(9, 0, 0): 136 case IP_VERSION(11, 0, 7): 137 case IP_VERSION(11, 0, 9): 138 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 139 ret = psp_init_cap_microcode(psp, ucode_prefix); 140 break; 141 case IP_VERSION(13, 0, 2): 142 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 143 ret = psp_init_cap_microcode(psp, ucode_prefix); 144 ret &= psp_init_ta_microcode(psp, ucode_prefix); 145 break; 146 case IP_VERSION(13, 0, 0): 147 adev->virt.autoload_ucode_id = 0; 148 break; 149 case IP_VERSION(13, 0, 10): 150 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA; 151 break; 152 default: 153 return -EINVAL; 154 } 155 return ret; 156 } 157 158 static int psp_early_init(void *handle) 159 { 160 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 161 struct psp_context *psp = &adev->psp; 162 163 switch (adev->ip_versions[MP0_HWIP][0]) { 164 case IP_VERSION(9, 0, 0): 165 psp_v3_1_set_psp_funcs(psp); 166 psp->autoload_supported = false; 167 break; 168 case IP_VERSION(10, 0, 0): 169 case IP_VERSION(10, 0, 1): 170 psp_v10_0_set_psp_funcs(psp); 171 psp->autoload_supported = false; 172 break; 173 case IP_VERSION(11, 0, 2): 174 case IP_VERSION(11, 0, 4): 175 psp_v11_0_set_psp_funcs(psp); 176 psp->autoload_supported = false; 177 break; 178 case IP_VERSION(11, 0, 0): 179 case IP_VERSION(11, 0, 5): 180 case IP_VERSION(11, 0, 9): 181 case IP_VERSION(11, 0, 7): 182 case IP_VERSION(11, 0, 11): 183 case IP_VERSION(11, 5, 0): 184 case IP_VERSION(11, 0, 12): 185 case IP_VERSION(11, 0, 13): 186 psp_v11_0_set_psp_funcs(psp); 187 psp->autoload_supported = true; 188 break; 189 case IP_VERSION(11, 0, 3): 190 case IP_VERSION(12, 0, 1): 191 psp_v12_0_set_psp_funcs(psp); 192 break; 193 case IP_VERSION(13, 0, 2): 194 case IP_VERSION(13, 0, 6): 195 psp_v13_0_set_psp_funcs(psp); 196 break; 197 case IP_VERSION(13, 0, 1): 198 case IP_VERSION(13, 0, 3): 199 case IP_VERSION(13, 0, 5): 200 case IP_VERSION(13, 0, 8): 201 case IP_VERSION(13, 0, 10): 202 case IP_VERSION(13, 0, 11): 203 psp_v13_0_set_psp_funcs(psp); 204 psp->autoload_supported = true; 205 break; 206 case IP_VERSION(11, 0, 8): 207 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { 208 psp_v11_0_8_set_psp_funcs(psp); 209 psp->autoload_supported = false; 210 } 211 break; 212 case IP_VERSION(13, 0, 0): 213 case IP_VERSION(13, 0, 7): 214 psp_v13_0_set_psp_funcs(psp); 215 psp->autoload_supported = true; 216 break; 217 case IP_VERSION(13, 0, 4): 218 psp_v13_0_4_set_psp_funcs(psp); 219 psp->autoload_supported = true; 220 break; 221 default: 222 return -EINVAL; 223 } 224 225 psp->adev = adev; 226 227 psp_check_pmfw_centralized_cstate_management(psp); 228 229 if (amdgpu_sriov_vf(adev)) 230 return psp_init_sriov_microcode(psp); 231 else 232 return psp_init_microcode(psp); 233 } 234 235 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) 236 { 237 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, 238 &mem_ctx->shared_buf); 239 mem_ctx->shared_bo = NULL; 240 } 241 242 static void psp_free_shared_bufs(struct psp_context *psp) 243 { 244 void *tmr_buf; 245 void **pptr; 246 247 /* free TMR memory buffer */ 248 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 249 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 250 psp->tmr_bo = NULL; 251 252 /* free xgmi shared memory */ 253 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context); 254 255 /* free ras shared memory */ 256 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context); 257 258 /* free hdcp shared memory */ 259 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context); 260 261 /* free dtm shared memory */ 262 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context); 263 264 /* free rap shared memory */ 265 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 266 267 /* free securedisplay shared memory */ 268 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 269 270 271 } 272 273 static void psp_memory_training_fini(struct psp_context *psp) 274 { 275 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 276 277 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 278 kfree(ctx->sys_cache); 279 ctx->sys_cache = NULL; 280 } 281 282 static int psp_memory_training_init(struct psp_context *psp) 283 { 284 int ret; 285 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 286 287 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 288 DRM_DEBUG("memory training is not supported!\n"); 289 return 0; 290 } 291 292 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 293 if (ctx->sys_cache == NULL) { 294 DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); 295 ret = -ENOMEM; 296 goto Err_out; 297 } 298 299 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 300 ctx->train_data_size, 301 ctx->p2c_train_data_offset, 302 ctx->c2p_train_data_offset); 303 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 304 return 0; 305 306 Err_out: 307 psp_memory_training_fini(psp); 308 return ret; 309 } 310 311 /* 312 * Helper funciton to query psp runtime database entry 313 * 314 * @adev: amdgpu_device pointer 315 * @entry_type: the type of psp runtime database entry 316 * @db_entry: runtime database entry pointer 317 * 318 * Return false if runtime database doesn't exit or entry is invalid 319 * or true if the specific database entry is found, and copy to @db_entry 320 */ 321 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 322 enum psp_runtime_entry_type entry_type, 323 void *db_entry) 324 { 325 uint64_t db_header_pos, db_dir_pos; 326 struct psp_runtime_data_header db_header = {0}; 327 struct psp_runtime_data_directory db_dir = {0}; 328 bool ret = false; 329 int i; 330 331 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 332 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 333 334 /* read runtime db header from vram */ 335 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 336 sizeof(struct psp_runtime_data_header), false); 337 338 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 339 /* runtime db doesn't exist, exit */ 340 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n"); 341 return false; 342 } 343 344 /* read runtime database entry from vram */ 345 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 346 sizeof(struct psp_runtime_data_directory), false); 347 348 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 349 /* invalid db entry count, exit */ 350 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 351 return false; 352 } 353 354 /* look up for requested entry type */ 355 for (i = 0; i < db_dir.entry_count && !ret; i++) { 356 if (db_dir.entry_list[i].entry_type == entry_type) { 357 switch (entry_type) { 358 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 359 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 360 /* invalid db entry size */ 361 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n"); 362 return false; 363 } 364 /* read runtime database entry */ 365 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 366 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 367 ret = true; 368 break; 369 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS: 370 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) { 371 /* invalid db entry size */ 372 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n"); 373 return false; 374 } 375 /* read runtime database entry */ 376 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 377 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false); 378 ret = true; 379 break; 380 default: 381 ret = false; 382 break; 383 } 384 } 385 } 386 387 return ret; 388 } 389 390 static int psp_sw_init(void *handle) 391 { 392 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 393 struct psp_context *psp = &adev->psp; 394 int ret; 395 struct psp_runtime_boot_cfg_entry boot_cfg_entry; 396 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 397 struct psp_runtime_scpm_entry scpm_entry; 398 399 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 400 if (!psp->cmd) { 401 DRM_ERROR("Failed to allocate memory to command buffer!\n"); 402 ret = -ENOMEM; 403 } 404 405 adev->psp.xgmi_context.supports_extended_data = 406 !adev->gmc.xgmi.connected_to_cpu && 407 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2); 408 409 memset(&scpm_entry, 0, sizeof(scpm_entry)); 410 if ((psp_get_runtime_db_entry(adev, 411 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS, 412 &scpm_entry)) && 413 (SCPM_DISABLE != scpm_entry.scpm_status)) { 414 adev->scpm_enabled = true; 415 adev->scpm_status = scpm_entry.scpm_status; 416 } else { 417 adev->scpm_enabled = false; 418 adev->scpm_status = SCPM_DISABLE; 419 } 420 421 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */ 422 423 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 424 if (psp_get_runtime_db_entry(adev, 425 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 426 &boot_cfg_entry)) { 427 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 428 if ((psp->boot_cfg_bitmask) & 429 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 430 /* If psp runtime database exists, then 431 * only enable two stage memory training 432 * when TWO_STAGE_DRAM_TRAINING bit is set 433 * in runtime database */ 434 mem_training_ctx->enable_mem_training = true; 435 } 436 437 } else { 438 /* If psp runtime database doesn't exist or 439 * is invalid, force enable two stage memory 440 * training */ 441 mem_training_ctx->enable_mem_training = true; 442 } 443 444 if (mem_training_ctx->enable_mem_training) { 445 ret = psp_memory_training_init(psp); 446 if (ret) { 447 DRM_ERROR("Failed to initialize memory training!\n"); 448 return ret; 449 } 450 451 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 452 if (ret) { 453 DRM_ERROR("Failed to process memory training!\n"); 454 return ret; 455 } 456 } 457 458 if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || 459 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) { 460 ret= psp_sysfs_init(adev); 461 if (ret) { 462 return ret; 463 } 464 } 465 466 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 467 amdgpu_sriov_vf(adev) ? 468 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 469 &psp->fw_pri_bo, 470 &psp->fw_pri_mc_addr, 471 &psp->fw_pri_buf); 472 if (ret) 473 return ret; 474 475 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 476 AMDGPU_GEM_DOMAIN_VRAM, 477 &psp->fence_buf_bo, 478 &psp->fence_buf_mc_addr, 479 &psp->fence_buf); 480 if (ret) 481 goto failed1; 482 483 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 484 AMDGPU_GEM_DOMAIN_VRAM, 485 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 486 (void **)&psp->cmd_buf_mem); 487 if (ret) 488 goto failed2; 489 490 return 0; 491 492 failed2: 493 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 494 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 495 failed1: 496 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 497 &psp->fence_buf_mc_addr, &psp->fence_buf); 498 return ret; 499 } 500 501 static int psp_sw_fini(void *handle) 502 { 503 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 504 struct psp_context *psp = &adev->psp; 505 struct psp_gfx_cmd_resp *cmd = psp->cmd; 506 507 psp_memory_training_fini(psp); 508 509 amdgpu_ucode_release(&psp->sos_fw); 510 amdgpu_ucode_release(&psp->asd_fw); 511 amdgpu_ucode_release(&psp->ta_fw); 512 amdgpu_ucode_release(&psp->cap_fw); 513 amdgpu_ucode_release(&psp->toc_fw); 514 515 if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || 516 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) 517 psp_sysfs_fini(adev); 518 519 kfree(cmd); 520 cmd = NULL; 521 522 if (psp->km_ring.ring_mem) 523 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 524 &psp->km_ring.ring_mem_mc_addr, 525 (void **)&psp->km_ring.ring_mem); 526 527 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 528 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 529 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 530 &psp->fence_buf_mc_addr, &psp->fence_buf); 531 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 532 (void **)&psp->cmd_buf_mem); 533 534 return 0; 535 } 536 537 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 538 uint32_t reg_val, uint32_t mask, bool check_changed) 539 { 540 uint32_t val; 541 int i; 542 struct amdgpu_device *adev = psp->adev; 543 544 if (psp->adev->no_hw_access) 545 return 0; 546 547 for (i = 0; i < adev->usec_timeout; i++) { 548 val = RREG32(reg_index); 549 if (check_changed) { 550 if (val != reg_val) 551 return 0; 552 } else { 553 if ((val & mask) == reg_val) 554 return 0; 555 } 556 udelay(1); 557 } 558 559 return -ETIME; 560 } 561 562 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) 563 { 564 switch (cmd_id) { 565 case GFX_CMD_ID_LOAD_TA: 566 return "LOAD_TA"; 567 case GFX_CMD_ID_UNLOAD_TA: 568 return "UNLOAD_TA"; 569 case GFX_CMD_ID_INVOKE_CMD: 570 return "INVOKE_CMD"; 571 case GFX_CMD_ID_LOAD_ASD: 572 return "LOAD_ASD"; 573 case GFX_CMD_ID_SETUP_TMR: 574 return "SETUP_TMR"; 575 case GFX_CMD_ID_LOAD_IP_FW: 576 return "LOAD_IP_FW"; 577 case GFX_CMD_ID_DESTROY_TMR: 578 return "DESTROY_TMR"; 579 case GFX_CMD_ID_SAVE_RESTORE: 580 return "SAVE_RESTORE_IP_FW"; 581 case GFX_CMD_ID_SETUP_VMR: 582 return "SETUP_VMR"; 583 case GFX_CMD_ID_DESTROY_VMR: 584 return "DESTROY_VMR"; 585 case GFX_CMD_ID_PROG_REG: 586 return "PROG_REG"; 587 case GFX_CMD_ID_GET_FW_ATTESTATION: 588 return "GET_FW_ATTESTATION"; 589 case GFX_CMD_ID_LOAD_TOC: 590 return "ID_LOAD_TOC"; 591 case GFX_CMD_ID_AUTOLOAD_RLC: 592 return "AUTOLOAD_RLC"; 593 case GFX_CMD_ID_BOOT_CFG: 594 return "BOOT_CFG"; 595 default: 596 return "UNKNOWN CMD"; 597 } 598 } 599 600 static int 601 psp_cmd_submit_buf(struct psp_context *psp, 602 struct amdgpu_firmware_info *ucode, 603 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 604 { 605 int ret; 606 int index, idx; 607 int timeout = 20000; 608 bool ras_intr = false; 609 bool skip_unsupport = false; 610 bool dev_entered; 611 612 if (psp->adev->no_hw_access) 613 return 0; 614 615 dev_entered = drm_dev_enter(adev_to_drm(psp->adev), &idx); 616 /* 617 * We allow sending PSP messages LOAD_ASD and UNLOAD_TA without acquiring 618 * a lock in drm_dev_enter during driver unload because we must call 619 * drm_dev_unplug as the beginning of unload driver sequence . It is very 620 * crucial that userspace can't access device instances anymore. 621 */ 622 if (!dev_entered) 623 WARN_ON(psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_LOAD_ASD && 624 psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_UNLOAD_TA && 625 psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_INVOKE_CMD); 626 627 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 628 629 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 630 631 index = atomic_inc_return(&psp->fence_value); 632 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 633 if (ret) { 634 atomic_dec(&psp->fence_value); 635 goto exit; 636 } 637 638 amdgpu_device_invalidate_hdp(psp->adev, NULL); 639 while (*((unsigned int *)psp->fence_buf) != index) { 640 if (--timeout == 0) 641 break; 642 /* 643 * Shouldn't wait for timeout when err_event_athub occurs, 644 * because gpu reset thread triggered and lock resource should 645 * be released for psp resume sequence. 646 */ 647 ras_intr = amdgpu_ras_intr_triggered(); 648 if (ras_intr) 649 break; 650 usleep_range(10, 100); 651 amdgpu_device_invalidate_hdp(psp->adev, NULL); 652 } 653 654 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 655 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 656 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 657 658 memcpy((void*)&cmd->resp, (void*)&psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp)); 659 660 /* In some cases, psp response status is not 0 even there is no 661 * problem while the command is submitted. Some version of PSP FW 662 * doesn't write 0 to that field. 663 * So here we would like to only print a warning instead of an error 664 * during psp initialization to avoid breaking hw_init and it doesn't 665 * return -EINVAL. 666 */ 667 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 668 if (ucode) 669 DRM_WARN("failed to load ucode %s(0x%X) ", 670 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 671 DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 672 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, 673 psp->cmd_buf_mem->resp.status); 674 /* If any firmware (including CAP) load fails under SRIOV, it should 675 * return failure to stop the VF from initializing. 676 * Also return failure in case of timeout 677 */ 678 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) { 679 ret = -EINVAL; 680 goto exit; 681 } 682 } 683 684 if (ucode) { 685 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 686 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 687 } 688 689 exit: 690 if (dev_entered) 691 drm_dev_exit(idx); 692 return ret; 693 } 694 695 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp) 696 { 697 struct psp_gfx_cmd_resp *cmd = psp->cmd; 698 699 mutex_lock(&psp->mutex); 700 701 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 702 703 return cmd; 704 } 705 706 static void release_psp_cmd_buf(struct psp_context *psp) 707 { 708 mutex_unlock(&psp->mutex); 709 } 710 711 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 712 struct psp_gfx_cmd_resp *cmd, 713 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 714 { 715 struct amdgpu_device *adev = psp->adev; 716 uint32_t size = amdgpu_bo_size(tmr_bo); 717 uint64_t tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 718 719 if (amdgpu_sriov_vf(psp->adev)) 720 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 721 else 722 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 723 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 724 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 725 cmd->cmd.cmd_setup_tmr.buf_size = size; 726 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 727 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 728 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 729 } 730 731 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 732 uint64_t pri_buf_mc, uint32_t size) 733 { 734 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 735 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 736 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 737 cmd->cmd.cmd_load_toc.toc_size = size; 738 } 739 740 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 741 static int psp_load_toc(struct psp_context *psp, 742 uint32_t *tmr_size) 743 { 744 int ret; 745 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 746 747 /* Copy toc to psp firmware private buffer */ 748 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes); 749 750 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes); 751 752 ret = psp_cmd_submit_buf(psp, NULL, cmd, 753 psp->fence_buf_mc_addr); 754 if (!ret) 755 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 756 757 release_psp_cmd_buf(psp); 758 759 return ret; 760 } 761 762 /* Set up Trusted Memory Region */ 763 static int psp_tmr_init(struct psp_context *psp) 764 { 765 int ret = 0; 766 int tmr_size; 767 void *tmr_buf; 768 void **pptr; 769 770 /* 771 * According to HW engineer, they prefer the TMR address be "naturally 772 * aligned" , e.g. the start address be an integer divide of TMR size. 773 * 774 * Note: this memory need be reserved till the driver 775 * uninitializes. 776 */ 777 tmr_size = PSP_TMR_SIZE(psp->adev); 778 779 /* For ASICs support RLC autoload, psp will parse the toc 780 * and calculate the total size of TMR needed */ 781 if (!amdgpu_sriov_vf(psp->adev) && 782 psp->toc.start_addr && 783 psp->toc.size_bytes && 784 psp->fw_pri_buf) { 785 ret = psp_load_toc(psp, &tmr_size); 786 if (ret) { 787 DRM_ERROR("Failed to load toc\n"); 788 return ret; 789 } 790 } 791 792 if (!psp->tmr_bo) { 793 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 794 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 795 PSP_TMR_ALIGNMENT, 796 AMDGPU_HAS_VRAM(psp->adev) ? 797 AMDGPU_GEM_DOMAIN_VRAM : 798 AMDGPU_GEM_DOMAIN_GTT, 799 &psp->tmr_bo, &psp->tmr_mc_addr, 800 pptr); 801 } 802 803 return ret; 804 } 805 806 static bool psp_skip_tmr(struct psp_context *psp) 807 { 808 switch (psp->adev->ip_versions[MP0_HWIP][0]) { 809 case IP_VERSION(11, 0, 9): 810 case IP_VERSION(11, 0, 7): 811 case IP_VERSION(13, 0, 2): 812 case IP_VERSION(13, 0, 10): 813 return true; 814 default: 815 return false; 816 } 817 } 818 819 static int psp_tmr_load(struct psp_context *psp) 820 { 821 int ret; 822 struct psp_gfx_cmd_resp *cmd; 823 824 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 825 * Already set up by host driver. 826 */ 827 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 828 return 0; 829 830 cmd = acquire_psp_cmd_buf(psp); 831 832 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 833 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 834 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 835 836 ret = psp_cmd_submit_buf(psp, NULL, cmd, 837 psp->fence_buf_mc_addr); 838 839 release_psp_cmd_buf(psp); 840 841 return ret; 842 } 843 844 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 845 struct psp_gfx_cmd_resp *cmd) 846 { 847 if (amdgpu_sriov_vf(psp->adev)) 848 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 849 else 850 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 851 } 852 853 static int psp_tmr_unload(struct psp_context *psp) 854 { 855 int ret; 856 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 857 858 psp_prep_tmr_unload_cmd_buf(psp, cmd); 859 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n"); 860 861 ret = psp_cmd_submit_buf(psp, NULL, cmd, 862 psp->fence_buf_mc_addr); 863 864 release_psp_cmd_buf(psp); 865 866 return ret; 867 } 868 869 static int psp_tmr_terminate(struct psp_context *psp) 870 { 871 return psp_tmr_unload(psp); 872 } 873 874 int psp_get_fw_attestation_records_addr(struct psp_context *psp, 875 uint64_t *output_ptr) 876 { 877 int ret; 878 struct psp_gfx_cmd_resp *cmd; 879 880 if (!output_ptr) 881 return -EINVAL; 882 883 if (amdgpu_sriov_vf(psp->adev)) 884 return 0; 885 886 cmd = acquire_psp_cmd_buf(psp); 887 888 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION; 889 890 ret = psp_cmd_submit_buf(psp, NULL, cmd, 891 psp->fence_buf_mc_addr); 892 893 if (!ret) { 894 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) + 895 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32); 896 } 897 898 release_psp_cmd_buf(psp); 899 900 return ret; 901 } 902 903 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 904 { 905 struct psp_context *psp = &adev->psp; 906 struct psp_gfx_cmd_resp *cmd; 907 int ret; 908 909 if (amdgpu_sriov_vf(adev)) 910 return 0; 911 912 cmd = acquire_psp_cmd_buf(psp); 913 914 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 915 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 916 917 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 918 if (!ret) { 919 *boot_cfg = 920 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 921 } 922 923 release_psp_cmd_buf(psp); 924 925 return ret; 926 } 927 928 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 929 { 930 int ret; 931 struct psp_context *psp = &adev->psp; 932 struct psp_gfx_cmd_resp *cmd; 933 934 if (amdgpu_sriov_vf(adev)) 935 return 0; 936 937 cmd = acquire_psp_cmd_buf(psp); 938 939 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 940 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 941 cmd->cmd.boot_cfg.boot_config = boot_cfg; 942 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 943 944 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 945 946 release_psp_cmd_buf(psp); 947 948 return ret; 949 } 950 951 static int psp_rl_load(struct amdgpu_device *adev) 952 { 953 int ret; 954 struct psp_context *psp = &adev->psp; 955 struct psp_gfx_cmd_resp *cmd; 956 957 if (!is_psp_fw_valid(psp->rl)) 958 return 0; 959 960 cmd = acquire_psp_cmd_buf(psp); 961 962 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 963 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes); 964 965 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 966 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr); 967 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr); 968 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes; 969 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST; 970 971 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 972 973 release_psp_cmd_buf(psp); 974 975 return ret; 976 } 977 978 static int psp_asd_initialize(struct psp_context *psp) 979 { 980 int ret; 981 982 /* If PSP version doesn't match ASD version, asd loading will be failed. 983 * add workaround to bypass it for sriov now. 984 * TODO: add version check to make it common 985 */ 986 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) 987 return 0; 988 989 psp->asd_context.mem_context.shared_mc_addr = 0; 990 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; 991 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; 992 993 ret = psp_ta_load(psp, &psp->asd_context); 994 if (!ret) 995 psp->asd_context.initialized = true; 996 997 return ret; 998 } 999 1000 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1001 uint32_t session_id) 1002 { 1003 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 1004 cmd->cmd.cmd_unload_ta.session_id = session_id; 1005 } 1006 1007 int psp_ta_unload(struct psp_context *psp, struct ta_context *context) 1008 { 1009 int ret; 1010 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1011 1012 psp_prep_ta_unload_cmd_buf(cmd, context->session_id); 1013 1014 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1015 1016 context->resp_status = cmd->resp.status; 1017 1018 release_psp_cmd_buf(psp); 1019 1020 return ret; 1021 } 1022 1023 static int psp_asd_terminate(struct psp_context *psp) 1024 { 1025 int ret; 1026 1027 if (amdgpu_sriov_vf(psp->adev)) 1028 return 0; 1029 1030 if (!psp->asd_context.initialized) 1031 return 0; 1032 1033 ret = psp_ta_unload(psp, &psp->asd_context); 1034 if (!ret) 1035 psp->asd_context.initialized = false; 1036 1037 return ret; 1038 } 1039 1040 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1041 uint32_t id, uint32_t value) 1042 { 1043 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 1044 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 1045 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 1046 } 1047 1048 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 1049 uint32_t value) 1050 { 1051 struct psp_gfx_cmd_resp *cmd; 1052 int ret = 0; 1053 1054 if (reg >= PSP_REG_LAST) 1055 return -EINVAL; 1056 1057 cmd = acquire_psp_cmd_buf(psp); 1058 1059 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 1060 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1061 if (ret) 1062 DRM_ERROR("PSP failed to program reg id %d", reg); 1063 1064 release_psp_cmd_buf(psp); 1065 1066 return ret; 1067 } 1068 1069 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1070 uint64_t ta_bin_mc, 1071 struct ta_context *context) 1072 { 1073 cmd->cmd_id = context->ta_load_type; 1074 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 1075 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 1076 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; 1077 1078 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 1079 lower_32_bits(context->mem_context.shared_mc_addr); 1080 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 1081 upper_32_bits(context->mem_context.shared_mc_addr); 1082 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; 1083 } 1084 1085 int psp_ta_init_shared_buf(struct psp_context *psp, 1086 struct ta_mem_context *mem_ctx) 1087 { 1088 /* 1089 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1090 * physical) for ta to host memory 1091 */ 1092 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1093 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1094 AMDGPU_GEM_DOMAIN_GTT, 1095 &mem_ctx->shared_bo, 1096 &mem_ctx->shared_mc_addr, 1097 &mem_ctx->shared_buf); 1098 } 1099 1100 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1101 uint32_t ta_cmd_id, 1102 uint32_t session_id) 1103 { 1104 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1105 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 1106 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1107 } 1108 1109 int psp_ta_invoke(struct psp_context *psp, 1110 uint32_t ta_cmd_id, 1111 struct ta_context *context) 1112 { 1113 int ret; 1114 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 1115 1116 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); 1117 1118 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1119 psp->fence_buf_mc_addr); 1120 1121 context->resp_status = cmd->resp.status; 1122 1123 release_psp_cmd_buf(psp); 1124 1125 return ret; 1126 } 1127 1128 int psp_ta_load(struct psp_context *psp, struct ta_context *context) 1129 { 1130 int ret; 1131 struct psp_gfx_cmd_resp *cmd; 1132 1133 cmd = acquire_psp_cmd_buf(psp); 1134 1135 psp_copy_fw(psp, context->bin_desc.start_addr, 1136 context->bin_desc.size_bytes); 1137 1138 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); 1139 1140 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1141 psp->fence_buf_mc_addr); 1142 1143 context->resp_status = cmd->resp.status; 1144 1145 if (!ret) { 1146 context->session_id = cmd->resp.session_id; 1147 } 1148 1149 release_psp_cmd_buf(psp); 1150 1151 return ret; 1152 } 1153 1154 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1155 { 1156 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); 1157 } 1158 1159 int psp_xgmi_terminate(struct psp_context *psp) 1160 { 1161 int ret; 1162 struct amdgpu_device *adev = psp->adev; 1163 1164 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1165 if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) || 1166 (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && 1167 adev->gmc.xgmi.connected_to_cpu)) 1168 return 0; 1169 1170 if (!psp->xgmi_context.context.initialized) 1171 return 0; 1172 1173 ret = psp_ta_unload(psp, &psp->xgmi_context.context); 1174 1175 psp->xgmi_context.context.initialized = false; 1176 1177 return ret; 1178 } 1179 1180 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta) 1181 { 1182 struct ta_xgmi_shared_memory *xgmi_cmd; 1183 int ret; 1184 1185 if (!psp->ta_fw || 1186 !psp->xgmi_context.context.bin_desc.size_bytes || 1187 !psp->xgmi_context.context.bin_desc.start_addr) 1188 return -ENOENT; 1189 1190 if (!load_ta) 1191 goto invoke; 1192 1193 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; 1194 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1195 1196 if (!psp->xgmi_context.context.mem_context.shared_buf) { 1197 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); 1198 if (ret) 1199 return ret; 1200 } 1201 1202 /* Load XGMI TA */ 1203 ret = psp_ta_load(psp, &psp->xgmi_context.context); 1204 if (!ret) 1205 psp->xgmi_context.context.initialized = true; 1206 else 1207 return ret; 1208 1209 invoke: 1210 /* Initialize XGMI session */ 1211 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf); 1212 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1213 xgmi_cmd->flag_extend_link_record = set_extended_data; 1214 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 1215 1216 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1217 1218 return ret; 1219 } 1220 1221 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 1222 { 1223 struct ta_xgmi_shared_memory *xgmi_cmd; 1224 int ret; 1225 1226 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1227 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1228 1229 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 1230 1231 /* Invoke xgmi ta to get hive id */ 1232 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1233 if (ret) 1234 return ret; 1235 1236 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 1237 1238 return 0; 1239 } 1240 1241 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 1242 { 1243 struct ta_xgmi_shared_memory *xgmi_cmd; 1244 int ret; 1245 1246 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1247 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1248 1249 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 1250 1251 /* Invoke xgmi ta to get the node id */ 1252 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 1253 if (ret) 1254 return ret; 1255 1256 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 1257 1258 return 0; 1259 } 1260 1261 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1262 { 1263 return psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && 1264 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b; 1265 } 1266 1267 /* 1268 * Chips that support extended topology information require the driver to 1269 * reflect topology information in the opposite direction. This is 1270 * because the TA has already exceeded its link record limit and if the 1271 * TA holds bi-directional information, the driver would have to do 1272 * multiple fetches instead of just two. 1273 */ 1274 static void psp_xgmi_reflect_topology_info(struct psp_context *psp, 1275 struct psp_xgmi_node_info node_info) 1276 { 1277 struct amdgpu_device *mirror_adev; 1278 struct amdgpu_hive_info *hive; 1279 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id; 1280 uint64_t dst_node_id = node_info.node_id; 1281 uint8_t dst_num_hops = node_info.num_hops; 1282 uint8_t dst_num_links = node_info.num_links; 1283 1284 hive = amdgpu_get_xgmi_hive(psp->adev); 1285 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) { 1286 struct psp_xgmi_topology_info *mirror_top_info; 1287 int j; 1288 1289 if (mirror_adev->gmc.xgmi.node_id != dst_node_id) 1290 continue; 1291 1292 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info; 1293 for (j = 0; j < mirror_top_info->num_nodes; j++) { 1294 if (mirror_top_info->nodes[j].node_id != src_node_id) 1295 continue; 1296 1297 mirror_top_info->nodes[j].num_hops = dst_num_hops; 1298 /* 1299 * prevent 0 num_links value re-reflection since reflection 1300 * criteria is based on num_hops (direct or indirect). 1301 * 1302 */ 1303 if (dst_num_links) 1304 mirror_top_info->nodes[j].num_links = dst_num_links; 1305 1306 break; 1307 } 1308 1309 break; 1310 } 1311 1312 amdgpu_put_xgmi_hive(hive); 1313 } 1314 1315 int psp_xgmi_get_topology_info(struct psp_context *psp, 1316 int number_devices, 1317 struct psp_xgmi_topology_info *topology, 1318 bool get_extended_data) 1319 { 1320 struct ta_xgmi_shared_memory *xgmi_cmd; 1321 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1322 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 1323 int i; 1324 int ret; 1325 1326 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1327 return -EINVAL; 1328 1329 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1330 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1331 xgmi_cmd->flag_extend_link_record = get_extended_data; 1332 1333 /* Fill in the shared memory with topology information as input */ 1334 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1335 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO; 1336 topology_info_input->num_nodes = number_devices; 1337 1338 for (i = 0; i < topology_info_input->num_nodes; i++) { 1339 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1340 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1341 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 1342 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1343 } 1344 1345 /* Invoke xgmi ta to get the topology information */ 1346 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO); 1347 if (ret) 1348 return ret; 1349 1350 /* Read the output topology information from the shared memory */ 1351 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 1352 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 1353 for (i = 0; i < topology->num_nodes; i++) { 1354 /* extended data will either be 0 or equal to non-extended data */ 1355 if (topology_info_output->nodes[i].num_hops) 1356 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 1357 1358 /* non-extended data gets everything here so no need to update */ 1359 if (!get_extended_data) { 1360 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 1361 topology->nodes[i].is_sharing_enabled = 1362 topology_info_output->nodes[i].is_sharing_enabled; 1363 topology->nodes[i].sdma_engine = 1364 topology_info_output->nodes[i].sdma_engine; 1365 } 1366 1367 } 1368 1369 /* Invoke xgmi ta again to get the link information */ 1370 if (psp_xgmi_peer_link_info_supported(psp)) { 1371 struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output; 1372 1373 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1374 1375 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_PEER_LINKS); 1376 1377 if (ret) 1378 return ret; 1379 1380 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info; 1381 for (i = 0; i < topology->num_nodes; i++) { 1382 /* accumulate num_links on extended data */ 1383 topology->nodes[i].num_links = get_extended_data ? 1384 topology->nodes[i].num_links + 1385 link_info_output->nodes[i].num_links : 1386 link_info_output->nodes[i].num_links; 1387 1388 /* reflect the topology information for bi-directionality */ 1389 if (psp->xgmi_context.supports_extended_data && 1390 get_extended_data && topology->nodes[i].num_hops) 1391 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]); 1392 } 1393 } 1394 1395 return 0; 1396 } 1397 1398 int psp_xgmi_set_topology_info(struct psp_context *psp, 1399 int number_devices, 1400 struct psp_xgmi_topology_info *topology) 1401 { 1402 struct ta_xgmi_shared_memory *xgmi_cmd; 1403 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 1404 int i; 1405 1406 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 1407 return -EINVAL; 1408 1409 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf; 1410 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 1411 1412 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 1413 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 1414 topology_info_input->num_nodes = number_devices; 1415 1416 for (i = 0; i < topology_info_input->num_nodes; i++) { 1417 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 1418 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 1419 topology_info_input->nodes[i].is_sharing_enabled = 1; 1420 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 1421 } 1422 1423 /* Invoke xgmi ta to set topology information */ 1424 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 1425 } 1426 1427 // ras begin 1428 static void psp_ras_ta_check_status(struct psp_context *psp) 1429 { 1430 struct ta_ras_shared_memory *ras_cmd = 1431 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1432 1433 switch (ras_cmd->ras_status) { 1434 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: 1435 dev_warn(psp->adev->dev, 1436 "RAS WARNING: cmd failed due to unsupported ip\n"); 1437 break; 1438 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: 1439 dev_warn(psp->adev->dev, 1440 "RAS WARNING: cmd failed due to unsupported error injection\n"); 1441 break; 1442 case TA_RAS_STATUS__SUCCESS: 1443 break; 1444 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: 1445 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) 1446 dev_warn(psp->adev->dev, 1447 "RAS WARNING: Inject error to critical region is not allowed\n"); 1448 break; 1449 default: 1450 dev_warn(psp->adev->dev, 1451 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); 1452 break; 1453 } 1454 } 1455 1456 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1457 { 1458 struct ta_ras_shared_memory *ras_cmd; 1459 int ret; 1460 1461 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1462 1463 /* 1464 * TODO: bypass the loading in sriov for now 1465 */ 1466 if (amdgpu_sriov_vf(psp->adev)) 1467 return 0; 1468 1469 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); 1470 1471 if (amdgpu_ras_intr_triggered()) 1472 return ret; 1473 1474 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) 1475 { 1476 DRM_WARN("RAS: Unsupported Interface"); 1477 return -EINVAL; 1478 } 1479 1480 if (!ret) { 1481 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1482 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1483 1484 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1485 } 1486 else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1487 dev_warn(psp->adev->dev, 1488 "RAS internal register access blocked\n"); 1489 1490 psp_ras_ta_check_status(psp); 1491 } 1492 1493 return ret; 1494 } 1495 1496 int psp_ras_enable_features(struct psp_context *psp, 1497 union ta_ras_cmd_input *info, bool enable) 1498 { 1499 struct ta_ras_shared_memory *ras_cmd; 1500 int ret; 1501 1502 if (!psp->ras_context.context.initialized) 1503 return -EINVAL; 1504 1505 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1506 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1507 1508 if (enable) 1509 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 1510 else 1511 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 1512 1513 ras_cmd->ras_in_message = *info; 1514 1515 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1516 if (ret) 1517 return -EINVAL; 1518 1519 return 0; 1520 } 1521 1522 int psp_ras_terminate(struct psp_context *psp) 1523 { 1524 int ret; 1525 1526 /* 1527 * TODO: bypass the terminate in sriov for now 1528 */ 1529 if (amdgpu_sriov_vf(psp->adev)) 1530 return 0; 1531 1532 if (!psp->ras_context.context.initialized) 1533 return 0; 1534 1535 ret = psp_ta_unload(psp, &psp->ras_context.context); 1536 1537 psp->ras_context.context.initialized = false; 1538 1539 return ret; 1540 } 1541 1542 int psp_ras_initialize(struct psp_context *psp) 1543 { 1544 int ret; 1545 uint32_t boot_cfg = 0xFF; 1546 struct amdgpu_device *adev = psp->adev; 1547 struct ta_ras_shared_memory *ras_cmd; 1548 1549 /* 1550 * TODO: bypass the initialize in sriov for now 1551 */ 1552 if (amdgpu_sriov_vf(adev)) 1553 return 0; 1554 1555 if (!adev->psp.ras_context.context.bin_desc.size_bytes || 1556 !adev->psp.ras_context.context.bin_desc.start_addr) { 1557 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1558 return 0; 1559 } 1560 1561 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1562 /* query GECC enablement status from boot config 1563 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1564 */ 1565 ret = psp_boot_config_get(adev, &boot_cfg); 1566 if (ret) 1567 dev_warn(adev->dev, "PSP get boot config failed\n"); 1568 1569 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) { 1570 if (!boot_cfg) { 1571 dev_info(adev->dev, "GECC is disabled\n"); 1572 } else { 1573 /* disable GECC in next boot cycle if ras is 1574 * disabled by module parameter amdgpu_ras_enable 1575 * and/or amdgpu_ras_mask, or boot_config_get call 1576 * is failed 1577 */ 1578 ret = psp_boot_config_set(adev, 0); 1579 if (ret) 1580 dev_warn(adev->dev, "PSP set boot config failed\n"); 1581 else 1582 dev_warn(adev->dev, "GECC will be disabled in next boot cycle " 1583 "if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1584 } 1585 } else { 1586 if (1 == boot_cfg) { 1587 dev_info(adev->dev, "GECC is enabled\n"); 1588 } else { 1589 /* enable GECC in next boot cycle if it is disabled 1590 * in boot config, or force enable GECC if failed to 1591 * get boot configuration 1592 */ 1593 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1594 if (ret) 1595 dev_warn(adev->dev, "PSP set boot config failed\n"); 1596 else 1597 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1598 } 1599 } 1600 } 1601 1602 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; 1603 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1604 1605 if (!psp->ras_context.context.mem_context.shared_buf) { 1606 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); 1607 if (ret) 1608 return ret; 1609 } 1610 1611 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1612 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1613 1614 if (amdgpu_ras_is_poison_mode_supported(adev)) 1615 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; 1616 if (!adev->gmc.xgmi.connected_to_cpu) 1617 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; 1618 1619 ret = psp_ta_load(psp, &psp->ras_context.context); 1620 1621 if (!ret && !ras_cmd->ras_status) 1622 psp->ras_context.context.initialized = true; 1623 else { 1624 if (ras_cmd->ras_status) 1625 dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 1626 1627 /* fail to load RAS TA */ 1628 psp->ras_context.context.initialized = false; 1629 } 1630 1631 return ret; 1632 } 1633 1634 int psp_ras_trigger_error(struct psp_context *psp, 1635 struct ta_ras_trigger_error_input *info) 1636 { 1637 struct ta_ras_shared_memory *ras_cmd; 1638 int ret; 1639 1640 if (!psp->ras_context.context.initialized) 1641 return -EINVAL; 1642 1643 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; 1644 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1645 1646 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; 1647 ras_cmd->ras_in_message.trigger_error = *info; 1648 1649 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1650 if (ret) 1651 return -EINVAL; 1652 1653 /* If err_event_athub occurs error inject was successful, however 1654 return status from TA is no long reliable */ 1655 if (amdgpu_ras_intr_triggered()) 1656 return 0; 1657 1658 if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) 1659 return -EACCES; 1660 else if (ras_cmd->ras_status) 1661 return -EINVAL; 1662 1663 return 0; 1664 } 1665 // ras end 1666 1667 // HDCP start 1668 static int psp_hdcp_initialize(struct psp_context *psp) 1669 { 1670 int ret; 1671 1672 /* 1673 * TODO: bypass the initialize in sriov for now 1674 */ 1675 if (amdgpu_sriov_vf(psp->adev)) 1676 return 0; 1677 1678 if (!psp->hdcp_context.context.bin_desc.size_bytes || 1679 !psp->hdcp_context.context.bin_desc.start_addr) { 1680 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1681 return 0; 1682 } 1683 1684 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; 1685 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1686 1687 if (!psp->hdcp_context.context.mem_context.shared_buf) { 1688 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); 1689 if (ret) 1690 return ret; 1691 } 1692 1693 ret = psp_ta_load(psp, &psp->hdcp_context.context); 1694 if (!ret) { 1695 psp->hdcp_context.context.initialized = true; 1696 mutex_init(&psp->hdcp_context.mutex); 1697 } 1698 1699 return ret; 1700 } 1701 1702 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1703 { 1704 /* 1705 * TODO: bypass the loading in sriov for now 1706 */ 1707 if (amdgpu_sriov_vf(psp->adev)) 1708 return 0; 1709 1710 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); 1711 } 1712 1713 static int psp_hdcp_terminate(struct psp_context *psp) 1714 { 1715 int ret; 1716 1717 /* 1718 * TODO: bypass the terminate in sriov for now 1719 */ 1720 if (amdgpu_sriov_vf(psp->adev)) 1721 return 0; 1722 1723 if (!psp->hdcp_context.context.initialized) 1724 return 0; 1725 1726 ret = psp_ta_unload(psp, &psp->hdcp_context.context); 1727 1728 psp->hdcp_context.context.initialized = false; 1729 1730 return ret; 1731 } 1732 // HDCP end 1733 1734 // DTM start 1735 static int psp_dtm_initialize(struct psp_context *psp) 1736 { 1737 int ret; 1738 1739 /* 1740 * TODO: bypass the initialize in sriov for now 1741 */ 1742 if (amdgpu_sriov_vf(psp->adev)) 1743 return 0; 1744 1745 if (!psp->dtm_context.context.bin_desc.size_bytes || 1746 !psp->dtm_context.context.bin_desc.start_addr) { 1747 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1748 return 0; 1749 } 1750 1751 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; 1752 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1753 1754 if (!psp->dtm_context.context.mem_context.shared_buf) { 1755 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); 1756 if (ret) 1757 return ret; 1758 } 1759 1760 ret = psp_ta_load(psp, &psp->dtm_context.context); 1761 if (!ret) { 1762 psp->dtm_context.context.initialized = true; 1763 mutex_init(&psp->dtm_context.mutex); 1764 } 1765 1766 return ret; 1767 } 1768 1769 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1770 { 1771 /* 1772 * TODO: bypass the loading in sriov for now 1773 */ 1774 if (amdgpu_sriov_vf(psp->adev)) 1775 return 0; 1776 1777 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); 1778 } 1779 1780 static int psp_dtm_terminate(struct psp_context *psp) 1781 { 1782 int ret; 1783 1784 /* 1785 * TODO: bypass the terminate in sriov for now 1786 */ 1787 if (amdgpu_sriov_vf(psp->adev)) 1788 return 0; 1789 1790 if (!psp->dtm_context.context.initialized) 1791 return 0; 1792 1793 ret = psp_ta_unload(psp, &psp->dtm_context.context); 1794 1795 psp->dtm_context.context.initialized = false; 1796 1797 return ret; 1798 } 1799 // DTM end 1800 1801 // RAP start 1802 static int psp_rap_initialize(struct psp_context *psp) 1803 { 1804 int ret; 1805 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS; 1806 1807 /* 1808 * TODO: bypass the initialize in sriov for now 1809 */ 1810 if (amdgpu_sriov_vf(psp->adev)) 1811 return 0; 1812 1813 if (!psp->rap_context.context.bin_desc.size_bytes || 1814 !psp->rap_context.context.bin_desc.start_addr) { 1815 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 1816 return 0; 1817 } 1818 1819 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; 1820 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1821 1822 if (!psp->rap_context.context.mem_context.shared_buf) { 1823 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); 1824 if (ret) 1825 return ret; 1826 } 1827 1828 ret = psp_ta_load(psp, &psp->rap_context.context); 1829 if (!ret) { 1830 psp->rap_context.context.initialized = true; 1831 mutex_init(&psp->rap_context.mutex); 1832 } else 1833 return ret; 1834 1835 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); 1836 if (ret || status != TA_RAP_STATUS__SUCCESS) { 1837 psp_rap_terminate(psp); 1838 /* free rap shared memory */ 1839 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); 1840 1841 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", 1842 ret, status); 1843 1844 return ret; 1845 } 1846 1847 return 0; 1848 } 1849 1850 static int psp_rap_terminate(struct psp_context *psp) 1851 { 1852 int ret; 1853 1854 if (!psp->rap_context.context.initialized) 1855 return 0; 1856 1857 ret = psp_ta_unload(psp, &psp->rap_context.context); 1858 1859 psp->rap_context.context.initialized = false; 1860 1861 return ret; 1862 } 1863 1864 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status) 1865 { 1866 struct ta_rap_shared_memory *rap_cmd; 1867 int ret = 0; 1868 1869 if (!psp->rap_context.context.initialized) 1870 return 0; 1871 1872 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 1873 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 1874 return -EINVAL; 1875 1876 mutex_lock(&psp->rap_context.mutex); 1877 1878 rap_cmd = (struct ta_rap_shared_memory *) 1879 psp->rap_context.context.mem_context.shared_buf; 1880 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 1881 1882 rap_cmd->cmd_id = ta_cmd_id; 1883 rap_cmd->validation_method_id = METHOD_A; 1884 1885 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); 1886 if (ret) 1887 goto out_unlock; 1888 1889 if (status) 1890 *status = rap_cmd->rap_status; 1891 1892 out_unlock: 1893 mutex_unlock(&psp->rap_context.mutex); 1894 1895 return ret; 1896 } 1897 // RAP end 1898 1899 /* securedisplay start */ 1900 static int psp_securedisplay_initialize(struct psp_context *psp) 1901 { 1902 int ret; 1903 struct ta_securedisplay_cmd *securedisplay_cmd; 1904 1905 /* 1906 * TODO: bypass the initialize in sriov for now 1907 */ 1908 if (amdgpu_sriov_vf(psp->adev)) 1909 return 0; 1910 1911 if (!psp->securedisplay_context.context.bin_desc.size_bytes || 1912 !psp->securedisplay_context.context.bin_desc.start_addr) { 1913 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); 1914 return 0; 1915 } 1916 1917 psp->securedisplay_context.context.mem_context.shared_mem_size = 1918 PSP_SECUREDISPLAY_SHARED_MEM_SIZE; 1919 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; 1920 1921 if (!psp->securedisplay_context.context.initialized) { 1922 ret = psp_ta_init_shared_buf(psp, 1923 &psp->securedisplay_context.context.mem_context); 1924 if (ret) 1925 return ret; 1926 } 1927 1928 ret = psp_ta_load(psp, &psp->securedisplay_context.context); 1929 if (!ret) { 1930 psp->securedisplay_context.context.initialized = true; 1931 mutex_init(&psp->securedisplay_context.mutex); 1932 } else 1933 return ret; 1934 1935 mutex_lock(&psp->securedisplay_context.mutex); 1936 1937 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 1938 TA_SECUREDISPLAY_COMMAND__QUERY_TA); 1939 1940 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); 1941 1942 mutex_unlock(&psp->securedisplay_context.mutex); 1943 1944 if (ret) { 1945 psp_securedisplay_terminate(psp); 1946 /* free securedisplay shared memory */ 1947 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); 1948 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); 1949 return -EINVAL; 1950 } 1951 1952 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 1953 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 1954 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", 1955 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); 1956 } 1957 1958 return 0; 1959 } 1960 1961 static int psp_securedisplay_terminate(struct psp_context *psp) 1962 { 1963 int ret; 1964 1965 /* 1966 * TODO:bypass the terminate in sriov for now 1967 */ 1968 if (amdgpu_sriov_vf(psp->adev)) 1969 return 0; 1970 1971 if (!psp->securedisplay_context.context.initialized) 1972 return 0; 1973 1974 ret = psp_ta_unload(psp, &psp->securedisplay_context.context); 1975 1976 psp->securedisplay_context.context.initialized = false; 1977 1978 return ret; 1979 } 1980 1981 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1982 { 1983 int ret; 1984 1985 if (!psp->securedisplay_context.context.initialized) 1986 return -EINVAL; 1987 1988 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && 1989 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC) 1990 return -EINVAL; 1991 1992 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); 1993 1994 return ret; 1995 } 1996 /* SECUREDISPLAY end */ 1997 1998 static int psp_hw_start(struct psp_context *psp) 1999 { 2000 struct amdgpu_device *adev = psp->adev; 2001 int ret; 2002 2003 if (!amdgpu_sriov_vf(adev)) { 2004 if ((is_psp_fw_valid(psp->kdb)) && 2005 (psp->funcs->bootloader_load_kdb != NULL)) { 2006 ret = psp_bootloader_load_kdb(psp); 2007 if (ret) { 2008 DRM_ERROR("PSP load kdb failed!\n"); 2009 return ret; 2010 } 2011 } 2012 2013 if ((is_psp_fw_valid(psp->spl)) && 2014 (psp->funcs->bootloader_load_spl != NULL)) { 2015 ret = psp_bootloader_load_spl(psp); 2016 if (ret) { 2017 DRM_ERROR("PSP load spl failed!\n"); 2018 return ret; 2019 } 2020 } 2021 2022 if ((is_psp_fw_valid(psp->sys)) && 2023 (psp->funcs->bootloader_load_sysdrv != NULL)) { 2024 ret = psp_bootloader_load_sysdrv(psp); 2025 if (ret) { 2026 DRM_ERROR("PSP load sys drv failed!\n"); 2027 return ret; 2028 } 2029 } 2030 2031 if ((is_psp_fw_valid(psp->soc_drv)) && 2032 (psp->funcs->bootloader_load_soc_drv != NULL)) { 2033 ret = psp_bootloader_load_soc_drv(psp); 2034 if (ret) { 2035 DRM_ERROR("PSP load soc drv failed!\n"); 2036 return ret; 2037 } 2038 } 2039 2040 if ((is_psp_fw_valid(psp->intf_drv)) && 2041 (psp->funcs->bootloader_load_intf_drv != NULL)) { 2042 ret = psp_bootloader_load_intf_drv(psp); 2043 if (ret) { 2044 DRM_ERROR("PSP load intf drv failed!\n"); 2045 return ret; 2046 } 2047 } 2048 2049 if ((is_psp_fw_valid(psp->dbg_drv)) && 2050 (psp->funcs->bootloader_load_dbg_drv != NULL)) { 2051 ret = psp_bootloader_load_dbg_drv(psp); 2052 if (ret) { 2053 DRM_ERROR("PSP load dbg drv failed!\n"); 2054 return ret; 2055 } 2056 } 2057 2058 if ((is_psp_fw_valid(psp->ras_drv)) && 2059 (psp->funcs->bootloader_load_ras_drv != NULL)) { 2060 ret = psp_bootloader_load_ras_drv(psp); 2061 if (ret) { 2062 DRM_ERROR("PSP load ras_drv failed!\n"); 2063 return ret; 2064 } 2065 } 2066 2067 if ((is_psp_fw_valid(psp->sos)) && 2068 (psp->funcs->bootloader_load_sos != NULL)) { 2069 ret = psp_bootloader_load_sos(psp); 2070 if (ret) { 2071 DRM_ERROR("PSP load sos failed!\n"); 2072 return ret; 2073 } 2074 } 2075 } 2076 2077 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 2078 if (ret) { 2079 DRM_ERROR("PSP create ring failed!\n"); 2080 return ret; 2081 } 2082 2083 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) 2084 goto skip_pin_bo; 2085 2086 ret = psp_tmr_init(psp); 2087 if (ret) { 2088 DRM_ERROR("PSP tmr init failed!\n"); 2089 return ret; 2090 } 2091 2092 skip_pin_bo: 2093 /* 2094 * For ASICs with DF Cstate management centralized 2095 * to PMFW, TMR setup should be performed after PMFW 2096 * loaded and before other non-psp firmware loaded. 2097 */ 2098 if (psp->pmfw_centralized_cstate_management) { 2099 ret = psp_load_smu_fw(psp); 2100 if (ret) 2101 return ret; 2102 } 2103 2104 ret = psp_tmr_load(psp); 2105 if (ret) { 2106 DRM_ERROR("PSP load tmr failed!\n"); 2107 return ret; 2108 } 2109 2110 return 0; 2111 } 2112 2113 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 2114 enum psp_gfx_fw_type *type) 2115 { 2116 switch (ucode->ucode_id) { 2117 case AMDGPU_UCODE_ID_CAP: 2118 *type = GFX_FW_TYPE_CAP; 2119 break; 2120 case AMDGPU_UCODE_ID_SDMA0: 2121 *type = GFX_FW_TYPE_SDMA0; 2122 break; 2123 case AMDGPU_UCODE_ID_SDMA1: 2124 *type = GFX_FW_TYPE_SDMA1; 2125 break; 2126 case AMDGPU_UCODE_ID_SDMA2: 2127 *type = GFX_FW_TYPE_SDMA2; 2128 break; 2129 case AMDGPU_UCODE_ID_SDMA3: 2130 *type = GFX_FW_TYPE_SDMA3; 2131 break; 2132 case AMDGPU_UCODE_ID_SDMA4: 2133 *type = GFX_FW_TYPE_SDMA4; 2134 break; 2135 case AMDGPU_UCODE_ID_SDMA5: 2136 *type = GFX_FW_TYPE_SDMA5; 2137 break; 2138 case AMDGPU_UCODE_ID_SDMA6: 2139 *type = GFX_FW_TYPE_SDMA6; 2140 break; 2141 case AMDGPU_UCODE_ID_SDMA7: 2142 *type = GFX_FW_TYPE_SDMA7; 2143 break; 2144 case AMDGPU_UCODE_ID_CP_MES: 2145 *type = GFX_FW_TYPE_CP_MES; 2146 break; 2147 case AMDGPU_UCODE_ID_CP_MES_DATA: 2148 *type = GFX_FW_TYPE_MES_STACK; 2149 break; 2150 case AMDGPU_UCODE_ID_CP_MES1: 2151 *type = GFX_FW_TYPE_CP_MES_KIQ; 2152 break; 2153 case AMDGPU_UCODE_ID_CP_MES1_DATA: 2154 *type = GFX_FW_TYPE_MES_KIQ_STACK; 2155 break; 2156 case AMDGPU_UCODE_ID_CP_CE: 2157 *type = GFX_FW_TYPE_CP_CE; 2158 break; 2159 case AMDGPU_UCODE_ID_CP_PFP: 2160 *type = GFX_FW_TYPE_CP_PFP; 2161 break; 2162 case AMDGPU_UCODE_ID_CP_ME: 2163 *type = GFX_FW_TYPE_CP_ME; 2164 break; 2165 case AMDGPU_UCODE_ID_CP_MEC1: 2166 *type = GFX_FW_TYPE_CP_MEC; 2167 break; 2168 case AMDGPU_UCODE_ID_CP_MEC1_JT: 2169 *type = GFX_FW_TYPE_CP_MEC_ME1; 2170 break; 2171 case AMDGPU_UCODE_ID_CP_MEC2: 2172 *type = GFX_FW_TYPE_CP_MEC; 2173 break; 2174 case AMDGPU_UCODE_ID_CP_MEC2_JT: 2175 *type = GFX_FW_TYPE_CP_MEC_ME2; 2176 break; 2177 case AMDGPU_UCODE_ID_RLC_P: 2178 *type = GFX_FW_TYPE_RLC_P; 2179 break; 2180 case AMDGPU_UCODE_ID_RLC_V: 2181 *type = GFX_FW_TYPE_RLC_V; 2182 break; 2183 case AMDGPU_UCODE_ID_RLC_G: 2184 *type = GFX_FW_TYPE_RLC_G; 2185 break; 2186 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 2187 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 2188 break; 2189 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 2190 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 2191 break; 2192 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 2193 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 2194 break; 2195 case AMDGPU_UCODE_ID_RLC_IRAM: 2196 *type = GFX_FW_TYPE_RLC_IRAM; 2197 break; 2198 case AMDGPU_UCODE_ID_RLC_DRAM: 2199 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 2200 break; 2201 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS: 2202 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS; 2203 break; 2204 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS: 2205 *type = GFX_FW_TYPE_SE0_TAP_DELAYS; 2206 break; 2207 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS: 2208 *type = GFX_FW_TYPE_SE1_TAP_DELAYS; 2209 break; 2210 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS: 2211 *type = GFX_FW_TYPE_SE2_TAP_DELAYS; 2212 break; 2213 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS: 2214 *type = GFX_FW_TYPE_SE3_TAP_DELAYS; 2215 break; 2216 case AMDGPU_UCODE_ID_SMC: 2217 *type = GFX_FW_TYPE_SMU; 2218 break; 2219 case AMDGPU_UCODE_ID_PPTABLE: 2220 *type = GFX_FW_TYPE_PPTABLE; 2221 break; 2222 case AMDGPU_UCODE_ID_UVD: 2223 *type = GFX_FW_TYPE_UVD; 2224 break; 2225 case AMDGPU_UCODE_ID_UVD1: 2226 *type = GFX_FW_TYPE_UVD1; 2227 break; 2228 case AMDGPU_UCODE_ID_VCE: 2229 *type = GFX_FW_TYPE_VCE; 2230 break; 2231 case AMDGPU_UCODE_ID_VCN: 2232 *type = GFX_FW_TYPE_VCN; 2233 break; 2234 case AMDGPU_UCODE_ID_VCN1: 2235 *type = GFX_FW_TYPE_VCN1; 2236 break; 2237 case AMDGPU_UCODE_ID_DMCU_ERAM: 2238 *type = GFX_FW_TYPE_DMCU_ERAM; 2239 break; 2240 case AMDGPU_UCODE_ID_DMCU_INTV: 2241 *type = GFX_FW_TYPE_DMCU_ISR; 2242 break; 2243 case AMDGPU_UCODE_ID_VCN0_RAM: 2244 *type = GFX_FW_TYPE_VCN0_RAM; 2245 break; 2246 case AMDGPU_UCODE_ID_VCN1_RAM: 2247 *type = GFX_FW_TYPE_VCN1_RAM; 2248 break; 2249 case AMDGPU_UCODE_ID_DMCUB: 2250 *type = GFX_FW_TYPE_DMUB; 2251 break; 2252 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 2253 *type = GFX_FW_TYPE_SDMA_UCODE_TH0; 2254 break; 2255 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 2256 *type = GFX_FW_TYPE_SDMA_UCODE_TH1; 2257 break; 2258 case AMDGPU_UCODE_ID_IMU_I: 2259 *type = GFX_FW_TYPE_IMU_I; 2260 break; 2261 case AMDGPU_UCODE_ID_IMU_D: 2262 *type = GFX_FW_TYPE_IMU_D; 2263 break; 2264 case AMDGPU_UCODE_ID_CP_RS64_PFP: 2265 *type = GFX_FW_TYPE_RS64_PFP; 2266 break; 2267 case AMDGPU_UCODE_ID_CP_RS64_ME: 2268 *type = GFX_FW_TYPE_RS64_ME; 2269 break; 2270 case AMDGPU_UCODE_ID_CP_RS64_MEC: 2271 *type = GFX_FW_TYPE_RS64_MEC; 2272 break; 2273 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 2274 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK; 2275 break; 2276 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 2277 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK; 2278 break; 2279 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 2280 *type = GFX_FW_TYPE_RS64_ME_P0_STACK; 2281 break; 2282 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 2283 *type = GFX_FW_TYPE_RS64_ME_P1_STACK; 2284 break; 2285 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 2286 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK; 2287 break; 2288 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 2289 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK; 2290 break; 2291 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 2292 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK; 2293 break; 2294 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 2295 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK; 2296 break; 2297 case AMDGPU_UCODE_ID_MAXIMUM: 2298 default: 2299 return -EINVAL; 2300 } 2301 2302 return 0; 2303 } 2304 2305 static void psp_print_fw_hdr(struct psp_context *psp, 2306 struct amdgpu_firmware_info *ucode) 2307 { 2308 struct amdgpu_device *adev = psp->adev; 2309 struct common_firmware_header *hdr; 2310 2311 switch (ucode->ucode_id) { 2312 case AMDGPU_UCODE_ID_SDMA0: 2313 case AMDGPU_UCODE_ID_SDMA1: 2314 case AMDGPU_UCODE_ID_SDMA2: 2315 case AMDGPU_UCODE_ID_SDMA3: 2316 case AMDGPU_UCODE_ID_SDMA4: 2317 case AMDGPU_UCODE_ID_SDMA5: 2318 case AMDGPU_UCODE_ID_SDMA6: 2319 case AMDGPU_UCODE_ID_SDMA7: 2320 hdr = (struct common_firmware_header *) 2321 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 2322 amdgpu_ucode_print_sdma_hdr(hdr); 2323 break; 2324 case AMDGPU_UCODE_ID_CP_CE: 2325 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 2326 amdgpu_ucode_print_gfx_hdr(hdr); 2327 break; 2328 case AMDGPU_UCODE_ID_CP_PFP: 2329 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 2330 amdgpu_ucode_print_gfx_hdr(hdr); 2331 break; 2332 case AMDGPU_UCODE_ID_CP_ME: 2333 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 2334 amdgpu_ucode_print_gfx_hdr(hdr); 2335 break; 2336 case AMDGPU_UCODE_ID_CP_MEC1: 2337 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 2338 amdgpu_ucode_print_gfx_hdr(hdr); 2339 break; 2340 case AMDGPU_UCODE_ID_RLC_G: 2341 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 2342 amdgpu_ucode_print_rlc_hdr(hdr); 2343 break; 2344 case AMDGPU_UCODE_ID_SMC: 2345 hdr = (struct common_firmware_header *)adev->pm.fw->data; 2346 amdgpu_ucode_print_smc_hdr(hdr); 2347 break; 2348 default: 2349 break; 2350 } 2351 } 2352 2353 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, 2354 struct psp_gfx_cmd_resp *cmd) 2355 { 2356 int ret; 2357 uint64_t fw_mem_mc_addr = ucode->mc_addr; 2358 2359 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 2360 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 2361 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 2362 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 2363 2364 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 2365 if (ret) 2366 DRM_ERROR("Unknown firmware type\n"); 2367 2368 return ret; 2369 } 2370 2371 static int psp_execute_non_psp_fw_load(struct psp_context *psp, 2372 struct amdgpu_firmware_info *ucode) 2373 { 2374 int ret = 0; 2375 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2376 2377 ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd); 2378 if (!ret) { 2379 ret = psp_cmd_submit_buf(psp, ucode, cmd, 2380 psp->fence_buf_mc_addr); 2381 } 2382 2383 release_psp_cmd_buf(psp); 2384 2385 return ret; 2386 } 2387 2388 static int psp_load_smu_fw(struct psp_context *psp) 2389 { 2390 int ret; 2391 struct amdgpu_device *adev = psp->adev; 2392 struct amdgpu_firmware_info *ucode = 2393 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 2394 struct amdgpu_ras *ras = psp->ras_context.ras; 2395 2396 /* 2397 * Skip SMU FW reloading in case of using BACO for runpm only, 2398 * as SMU is always alive. 2399 */ 2400 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) 2401 return 0; 2402 2403 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2404 return 0; 2405 2406 if ((amdgpu_in_reset(adev) && 2407 ras && adev->ras_enabled && 2408 (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) || 2409 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) { 2410 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2411 if (ret) { 2412 DRM_WARN("Failed to set MP1 state prepare for reload\n"); 2413 } 2414 } 2415 2416 ret = psp_execute_non_psp_fw_load(psp, ucode); 2417 2418 if (ret) 2419 DRM_ERROR("PSP load smu failed!\n"); 2420 2421 return ret; 2422 } 2423 2424 static bool fw_load_skip_check(struct psp_context *psp, 2425 struct amdgpu_firmware_info *ucode) 2426 { 2427 if (!ucode->fw || !ucode->ucode_size) 2428 return true; 2429 2430 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2431 (psp_smu_reload_quirk(psp) || 2432 psp->autoload_supported || 2433 psp->pmfw_centralized_cstate_management)) 2434 return true; 2435 2436 if (amdgpu_sriov_vf(psp->adev) && 2437 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id)) 2438 return true; 2439 2440 if (psp->autoload_supported && 2441 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 2442 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 2443 /* skip mec JT when autoload is enabled */ 2444 return true; 2445 2446 return false; 2447 } 2448 2449 int psp_load_fw_list(struct psp_context *psp, 2450 struct amdgpu_firmware_info **ucode_list, int ucode_count) 2451 { 2452 int ret = 0, i; 2453 struct amdgpu_firmware_info *ucode; 2454 2455 for (i = 0; i < ucode_count; ++i) { 2456 ucode = ucode_list[i]; 2457 psp_print_fw_hdr(psp, ucode); 2458 ret = psp_execute_non_psp_fw_load(psp, ucode); 2459 if (ret) 2460 return ret; 2461 } 2462 return ret; 2463 } 2464 2465 static int psp_load_non_psp_fw(struct psp_context *psp) 2466 { 2467 int i, ret; 2468 struct amdgpu_firmware_info *ucode; 2469 struct amdgpu_device *adev = psp->adev; 2470 2471 if (psp->autoload_supported && 2472 !psp->pmfw_centralized_cstate_management) { 2473 ret = psp_load_smu_fw(psp); 2474 if (ret) 2475 return ret; 2476 } 2477 2478 for (i = 0; i < adev->firmware.max_ucodes; i++) { 2479 ucode = &adev->firmware.ucode[i]; 2480 2481 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 2482 !fw_load_skip_check(psp, ucode)) { 2483 ret = psp_load_smu_fw(psp); 2484 if (ret) 2485 return ret; 2486 continue; 2487 } 2488 2489 if (fw_load_skip_check(psp, ucode)) 2490 continue; 2491 2492 if (psp->autoload_supported && 2493 (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7) || 2494 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 11) || 2495 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 12)) && 2496 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 2497 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 2498 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 2499 /* PSP only receive one SDMA fw for sienna_cichlid, 2500 * as all four sdma fw are same */ 2501 continue; 2502 2503 psp_print_fw_hdr(psp, ucode); 2504 2505 ret = psp_execute_non_psp_fw_load(psp, ucode); 2506 if (ret) 2507 return ret; 2508 2509 /* Start rlc autoload after psp recieved all the gfx firmware */ 2510 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 2511 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { 2512 ret = psp_rlc_autoload_start(psp); 2513 if (ret) { 2514 DRM_ERROR("Failed to start rlc autoload\n"); 2515 return ret; 2516 } 2517 } 2518 } 2519 2520 return 0; 2521 } 2522 2523 static int psp_load_fw(struct amdgpu_device *adev) 2524 { 2525 int ret; 2526 struct psp_context *psp = &adev->psp; 2527 2528 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2529 /* should not destroy ring, only stop */ 2530 psp_ring_stop(psp, PSP_RING_TYPE__KM); 2531 } else { 2532 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2533 2534 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2535 if (ret) { 2536 DRM_ERROR("PSP ring init failed!\n"); 2537 goto failed; 2538 } 2539 } 2540 2541 ret = psp_hw_start(psp); 2542 if (ret) 2543 goto failed; 2544 2545 ret = psp_load_non_psp_fw(psp); 2546 if (ret) 2547 goto failed1; 2548 2549 ret = psp_asd_initialize(psp); 2550 if (ret) { 2551 DRM_ERROR("PSP load asd failed!\n"); 2552 goto failed1; 2553 } 2554 2555 ret = psp_rl_load(adev); 2556 if (ret) { 2557 DRM_ERROR("PSP load RL failed!\n"); 2558 goto failed1; 2559 } 2560 2561 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2562 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2563 ret = psp_xgmi_initialize(psp, false, true); 2564 /* Warning the XGMI seesion initialize failure 2565 * Instead of stop driver initialization 2566 */ 2567 if (ret) 2568 dev_err(psp->adev->dev, 2569 "XGMI: Failed to initialize XGMI session\n"); 2570 } 2571 } 2572 2573 if (psp->ta_fw) { 2574 ret = psp_ras_initialize(psp); 2575 if (ret) 2576 dev_err(psp->adev->dev, 2577 "RAS: Failed to initialize RAS\n"); 2578 2579 ret = psp_hdcp_initialize(psp); 2580 if (ret) 2581 dev_err(psp->adev->dev, 2582 "HDCP: Failed to initialize HDCP\n"); 2583 2584 ret = psp_dtm_initialize(psp); 2585 if (ret) 2586 dev_err(psp->adev->dev, 2587 "DTM: Failed to initialize DTM\n"); 2588 2589 ret = psp_rap_initialize(psp); 2590 if (ret) 2591 dev_err(psp->adev->dev, 2592 "RAP: Failed to initialize RAP\n"); 2593 2594 ret = psp_securedisplay_initialize(psp); 2595 if (ret) 2596 dev_err(psp->adev->dev, 2597 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 2598 } 2599 2600 return 0; 2601 2602 failed1: 2603 psp_free_shared_bufs(psp); 2604 failed: 2605 /* 2606 * all cleanup jobs (xgmi terminate, ras terminate, 2607 * ring destroy, cmd/fence/fw buffers destory, 2608 * psp->cmd destory) are delayed to psp_hw_fini 2609 */ 2610 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2611 return ret; 2612 } 2613 2614 static int psp_hw_init(void *handle) 2615 { 2616 int ret; 2617 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2618 2619 mutex_lock(&adev->firmware.mutex); 2620 /* 2621 * This sequence is just used on hw_init only once, no need on 2622 * resume. 2623 */ 2624 ret = amdgpu_ucode_init_bo(adev); 2625 if (ret) 2626 goto failed; 2627 2628 ret = psp_load_fw(adev); 2629 if (ret) { 2630 DRM_ERROR("PSP firmware loading failed\n"); 2631 goto failed; 2632 } 2633 2634 mutex_unlock(&adev->firmware.mutex); 2635 return 0; 2636 2637 failed: 2638 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 2639 mutex_unlock(&adev->firmware.mutex); 2640 return -EINVAL; 2641 } 2642 2643 static int psp_hw_fini(void *handle) 2644 { 2645 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2646 struct psp_context *psp = &adev->psp; 2647 2648 if (psp->ta_fw) { 2649 psp_ras_terminate(psp); 2650 psp_securedisplay_terminate(psp); 2651 psp_rap_terminate(psp); 2652 psp_dtm_terminate(psp); 2653 psp_hdcp_terminate(psp); 2654 2655 if (adev->gmc.xgmi.num_physical_nodes > 1) 2656 psp_xgmi_terminate(psp); 2657 } 2658 2659 psp_asd_terminate(psp); 2660 psp_tmr_terminate(psp); 2661 2662 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2663 2664 psp_free_shared_bufs(psp); 2665 2666 return 0; 2667 } 2668 2669 static int psp_suspend(void *handle) 2670 { 2671 int ret = 0; 2672 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2673 struct psp_context *psp = &adev->psp; 2674 2675 if (adev->gmc.xgmi.num_physical_nodes > 1 && 2676 psp->xgmi_context.context.initialized) { 2677 ret = psp_xgmi_terminate(psp); 2678 if (ret) { 2679 DRM_ERROR("Failed to terminate xgmi ta\n"); 2680 goto out; 2681 } 2682 } 2683 2684 if (psp->ta_fw) { 2685 ret = psp_ras_terminate(psp); 2686 if (ret) { 2687 DRM_ERROR("Failed to terminate ras ta\n"); 2688 goto out; 2689 } 2690 ret = psp_hdcp_terminate(psp); 2691 if (ret) { 2692 DRM_ERROR("Failed to terminate hdcp ta\n"); 2693 goto out; 2694 } 2695 ret = psp_dtm_terminate(psp); 2696 if (ret) { 2697 DRM_ERROR("Failed to terminate dtm ta\n"); 2698 goto out; 2699 } 2700 ret = psp_rap_terminate(psp); 2701 if (ret) { 2702 DRM_ERROR("Failed to terminate rap ta\n"); 2703 goto out; 2704 } 2705 ret = psp_securedisplay_terminate(psp); 2706 if (ret) { 2707 DRM_ERROR("Failed to terminate securedisplay ta\n"); 2708 goto out; 2709 } 2710 } 2711 2712 ret = psp_asd_terminate(psp); 2713 if (ret) { 2714 DRM_ERROR("Failed to terminate asd\n"); 2715 goto out; 2716 } 2717 2718 ret = psp_tmr_terminate(psp); 2719 if (ret) { 2720 DRM_ERROR("Failed to terminate tmr\n"); 2721 goto out; 2722 } 2723 2724 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 2725 if (ret) { 2726 DRM_ERROR("PSP ring stop failed\n"); 2727 } 2728 2729 out: 2730 return ret; 2731 } 2732 2733 static int psp_resume(void *handle) 2734 { 2735 int ret; 2736 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2737 struct psp_context *psp = &adev->psp; 2738 2739 DRM_INFO("PSP is resuming...\n"); 2740 2741 if (psp->mem_train_ctx.enable_mem_training) { 2742 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 2743 if (ret) { 2744 DRM_ERROR("Failed to process memory training!\n"); 2745 return ret; 2746 } 2747 } 2748 2749 mutex_lock(&adev->firmware.mutex); 2750 2751 ret = psp_hw_start(psp); 2752 if (ret) 2753 goto failed; 2754 2755 ret = psp_load_non_psp_fw(psp); 2756 if (ret) 2757 goto failed; 2758 2759 ret = psp_asd_initialize(psp); 2760 if (ret) { 2761 DRM_ERROR("PSP load asd failed!\n"); 2762 goto failed; 2763 } 2764 2765 ret = psp_rl_load(adev); 2766 if (ret) { 2767 dev_err(adev->dev, "PSP load RL failed!\n"); 2768 goto failed; 2769 } 2770 2771 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2772 ret = psp_xgmi_initialize(psp, false, true); 2773 /* Warning the XGMI seesion initialize failure 2774 * Instead of stop driver initialization 2775 */ 2776 if (ret) 2777 dev_err(psp->adev->dev, 2778 "XGMI: Failed to initialize XGMI session\n"); 2779 } 2780 2781 if (psp->ta_fw) { 2782 ret = psp_ras_initialize(psp); 2783 if (ret) 2784 dev_err(psp->adev->dev, 2785 "RAS: Failed to initialize RAS\n"); 2786 2787 ret = psp_hdcp_initialize(psp); 2788 if (ret) 2789 dev_err(psp->adev->dev, 2790 "HDCP: Failed to initialize HDCP\n"); 2791 2792 ret = psp_dtm_initialize(psp); 2793 if (ret) 2794 dev_err(psp->adev->dev, 2795 "DTM: Failed to initialize DTM\n"); 2796 2797 ret = psp_rap_initialize(psp); 2798 if (ret) 2799 dev_err(psp->adev->dev, 2800 "RAP: Failed to initialize RAP\n"); 2801 2802 ret = psp_securedisplay_initialize(psp); 2803 if (ret) 2804 dev_err(psp->adev->dev, 2805 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); 2806 } 2807 2808 mutex_unlock(&adev->firmware.mutex); 2809 2810 return 0; 2811 2812 failed: 2813 DRM_ERROR("PSP resume failed\n"); 2814 mutex_unlock(&adev->firmware.mutex); 2815 return ret; 2816 } 2817 2818 int psp_gpu_reset(struct amdgpu_device *adev) 2819 { 2820 int ret; 2821 2822 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 2823 return 0; 2824 2825 mutex_lock(&adev->psp.mutex); 2826 ret = psp_mode1_reset(&adev->psp); 2827 mutex_unlock(&adev->psp.mutex); 2828 2829 return ret; 2830 } 2831 2832 int psp_rlc_autoload_start(struct psp_context *psp) 2833 { 2834 int ret; 2835 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); 2836 2837 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 2838 2839 ret = psp_cmd_submit_buf(psp, NULL, cmd, 2840 psp->fence_buf_mc_addr); 2841 2842 release_psp_cmd_buf(psp); 2843 2844 return ret; 2845 } 2846 2847 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, 2848 uint64_t cmd_gpu_addr, int cmd_size) 2849 { 2850 struct amdgpu_firmware_info ucode = {0}; 2851 2852 ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : 2853 AMDGPU_UCODE_ID_VCN0_RAM; 2854 ucode.mc_addr = cmd_gpu_addr; 2855 ucode.ucode_size = cmd_size; 2856 2857 return psp_execute_non_psp_fw_load(&adev->psp, &ucode); 2858 } 2859 2860 int psp_ring_cmd_submit(struct psp_context *psp, 2861 uint64_t cmd_buf_mc_addr, 2862 uint64_t fence_mc_addr, 2863 int index) 2864 { 2865 unsigned int psp_write_ptr_reg = 0; 2866 struct psp_gfx_rb_frame *write_frame; 2867 struct psp_ring *ring = &psp->km_ring; 2868 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 2869 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 2870 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 2871 struct amdgpu_device *adev = psp->adev; 2872 uint32_t ring_size_dw = ring->ring_size / 4; 2873 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 2874 2875 /* KM (GPCOM) prepare write pointer */ 2876 psp_write_ptr_reg = psp_ring_get_wptr(psp); 2877 2878 /* Update KM RB frame pointer to new frame */ 2879 /* write_frame ptr increments by size of rb_frame in bytes */ 2880 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 2881 if ((psp_write_ptr_reg % ring_size_dw) == 0) 2882 write_frame = ring_buffer_start; 2883 else 2884 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 2885 /* Check invalid write_frame ptr address */ 2886 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 2887 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 2888 ring_buffer_start, ring_buffer_end, write_frame); 2889 DRM_ERROR("write_frame is pointing to address out of bounds\n"); 2890 return -EINVAL; 2891 } 2892 2893 /* Initialize KM RB frame */ 2894 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 2895 2896 /* Update KM RB frame */ 2897 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 2898 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 2899 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 2900 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 2901 write_frame->fence_value = index; 2902 amdgpu_device_flush_hdp(adev, NULL); 2903 2904 /* Update the write Pointer in DWORDs */ 2905 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 2906 psp_ring_set_wptr(psp, psp_write_ptr_reg); 2907 return 0; 2908 } 2909 2910 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name) 2911 { 2912 struct amdgpu_device *adev = psp->adev; 2913 char fw_name[PSP_FW_NAME_LEN]; 2914 const struct psp_firmware_header_v1_0 *asd_hdr; 2915 int err = 0; 2916 2917 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 2918 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name); 2919 if (err) 2920 goto out; 2921 2922 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 2923 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 2924 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 2925 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 2926 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + 2927 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 2928 return 0; 2929 out: 2930 amdgpu_ucode_release(&adev->psp.asd_fw); 2931 return err; 2932 } 2933 2934 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name) 2935 { 2936 struct amdgpu_device *adev = psp->adev; 2937 char fw_name[PSP_FW_NAME_LEN]; 2938 const struct psp_firmware_header_v1_0 *toc_hdr; 2939 int err = 0; 2940 2941 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name); 2942 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); 2943 if (err) 2944 goto out; 2945 2946 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 2947 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 2948 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 2949 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 2950 adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 2951 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 2952 return 0; 2953 out: 2954 amdgpu_ucode_release(&adev->psp.toc_fw); 2955 return err; 2956 } 2957 2958 static int parse_sos_bin_descriptor(struct psp_context *psp, 2959 const struct psp_fw_bin_desc *desc, 2960 const struct psp_firmware_header_v2_0 *sos_hdr) 2961 { 2962 uint8_t *ucode_start_addr = NULL; 2963 2964 if (!psp || !desc || !sos_hdr) 2965 return -EINVAL; 2966 2967 ucode_start_addr = (uint8_t *)sos_hdr + 2968 le32_to_cpu(desc->offset_bytes) + 2969 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 2970 2971 switch (desc->fw_type) { 2972 case PSP_FW_TYPE_PSP_SOS: 2973 psp->sos.fw_version = le32_to_cpu(desc->fw_version); 2974 psp->sos.feature_version = le32_to_cpu(desc->fw_version); 2975 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes); 2976 psp->sos.start_addr = ucode_start_addr; 2977 break; 2978 case PSP_FW_TYPE_PSP_SYS_DRV: 2979 psp->sys.fw_version = le32_to_cpu(desc->fw_version); 2980 psp->sys.feature_version = le32_to_cpu(desc->fw_version); 2981 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes); 2982 psp->sys.start_addr = ucode_start_addr; 2983 break; 2984 case PSP_FW_TYPE_PSP_KDB: 2985 psp->kdb.fw_version = le32_to_cpu(desc->fw_version); 2986 psp->kdb.feature_version = le32_to_cpu(desc->fw_version); 2987 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes); 2988 psp->kdb.start_addr = ucode_start_addr; 2989 break; 2990 case PSP_FW_TYPE_PSP_TOC: 2991 psp->toc.fw_version = le32_to_cpu(desc->fw_version); 2992 psp->toc.feature_version = le32_to_cpu(desc->fw_version); 2993 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes); 2994 psp->toc.start_addr = ucode_start_addr; 2995 break; 2996 case PSP_FW_TYPE_PSP_SPL: 2997 psp->spl.fw_version = le32_to_cpu(desc->fw_version); 2998 psp->spl.feature_version = le32_to_cpu(desc->fw_version); 2999 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes); 3000 psp->spl.start_addr = ucode_start_addr; 3001 break; 3002 case PSP_FW_TYPE_PSP_RL: 3003 psp->rl.fw_version = le32_to_cpu(desc->fw_version); 3004 psp->rl.feature_version = le32_to_cpu(desc->fw_version); 3005 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes); 3006 psp->rl.start_addr = ucode_start_addr; 3007 break; 3008 case PSP_FW_TYPE_PSP_SOC_DRV: 3009 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version); 3010 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version); 3011 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3012 psp->soc_drv.start_addr = ucode_start_addr; 3013 break; 3014 case PSP_FW_TYPE_PSP_INTF_DRV: 3015 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version); 3016 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version); 3017 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3018 psp->intf_drv.start_addr = ucode_start_addr; 3019 break; 3020 case PSP_FW_TYPE_PSP_DBG_DRV: 3021 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version); 3022 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version); 3023 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3024 psp->dbg_drv.start_addr = ucode_start_addr; 3025 break; 3026 case PSP_FW_TYPE_PSP_RAS_DRV: 3027 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version); 3028 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3029 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3030 psp->ras_drv.start_addr = ucode_start_addr; 3031 break; 3032 default: 3033 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); 3034 break; 3035 } 3036 3037 return 0; 3038 } 3039 3040 static int psp_init_sos_base_fw(struct amdgpu_device *adev) 3041 { 3042 const struct psp_firmware_header_v1_0 *sos_hdr; 3043 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3044 uint8_t *ucode_array_start_addr; 3045 3046 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3047 ucode_array_start_addr = (uint8_t *)sos_hdr + 3048 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3049 3050 if (adev->gmc.xgmi.connected_to_cpu || 3051 (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2))) { 3052 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3053 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3054 3055 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes); 3056 adev->psp.sys.start_addr = ucode_array_start_addr; 3057 3058 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes); 3059 adev->psp.sos.start_addr = ucode_array_start_addr + 3060 le32_to_cpu(sos_hdr->sos.offset_bytes); 3061 } else { 3062 /* Load alternate PSP SOS FW */ 3063 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3064 3065 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3066 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 3067 3068 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 3069 adev->psp.sys.start_addr = ucode_array_start_addr + 3070 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 3071 3072 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 3073 adev->psp.sos.start_addr = ucode_array_start_addr + 3074 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 3075 } 3076 3077 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) { 3078 dev_warn(adev->dev, "PSP SOS FW not available"); 3079 return -EINVAL; 3080 } 3081 3082 return 0; 3083 } 3084 3085 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) 3086 { 3087 struct amdgpu_device *adev = psp->adev; 3088 char fw_name[PSP_FW_NAME_LEN]; 3089 const struct psp_firmware_header_v1_0 *sos_hdr; 3090 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 3091 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3092 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3093 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; 3094 int err = 0; 3095 uint8_t *ucode_array_start_addr; 3096 int fw_index = 0; 3097 3098 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); 3099 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name); 3100 if (err) 3101 goto out; 3102 3103 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 3104 ucode_array_start_addr = (uint8_t *)sos_hdr + 3105 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3106 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3107 3108 switch (sos_hdr->header.header_version_major) { 3109 case 1: 3110 err = psp_init_sos_base_fw(adev); 3111 if (err) 3112 goto out; 3113 3114 if (sos_hdr->header.header_version_minor == 1) { 3115 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3116 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3117 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3118 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 3119 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3120 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3121 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3122 } 3123 if (sos_hdr->header.header_version_minor == 2) { 3124 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3125 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3126 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr + 3127 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3128 } 3129 if (sos_hdr->header.header_version_minor == 3) { 3130 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3131 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 3132 adev->psp.toc.start_addr = ucode_array_start_addr + 3133 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 3134 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 3135 adev->psp.kdb.start_addr = ucode_array_start_addr + 3136 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 3137 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 3138 adev->psp.spl.start_addr = ucode_array_start_addr + 3139 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 3140 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 3141 adev->psp.rl.start_addr = ucode_array_start_addr + 3142 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3143 } 3144 break; 3145 case 2: 3146 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; 3147 3148 if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3149 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); 3150 err = -EINVAL; 3151 goto out; 3152 } 3153 3154 for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) { 3155 err = parse_sos_bin_descriptor(psp, 3156 &sos_hdr_v2_0->psp_fw_bin[fw_index], 3157 sos_hdr_v2_0); 3158 if (err) 3159 goto out; 3160 } 3161 break; 3162 default: 3163 dev_err(adev->dev, 3164 "unsupported psp sos firmware\n"); 3165 err = -EINVAL; 3166 goto out; 3167 } 3168 3169 return 0; 3170 out: 3171 amdgpu_ucode_release(&adev->psp.sos_fw); 3172 3173 return err; 3174 } 3175 3176 static int parse_ta_bin_descriptor(struct psp_context *psp, 3177 const struct psp_fw_bin_desc *desc, 3178 const struct ta_firmware_header_v2_0 *ta_hdr) 3179 { 3180 uint8_t *ucode_start_addr = NULL; 3181 3182 if (!psp || !desc || !ta_hdr) 3183 return -EINVAL; 3184 3185 ucode_start_addr = (uint8_t *)ta_hdr + 3186 le32_to_cpu(desc->offset_bytes) + 3187 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3188 3189 switch (desc->fw_type) { 3190 case TA_FW_TYPE_PSP_ASD: 3191 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3192 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); 3193 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3194 psp->asd_context.bin_desc.start_addr = ucode_start_addr; 3195 break; 3196 case TA_FW_TYPE_PSP_XGMI: 3197 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3198 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3199 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; 3200 break; 3201 case TA_FW_TYPE_PSP_RAS: 3202 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3203 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3204 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; 3205 break; 3206 case TA_FW_TYPE_PSP_HDCP: 3207 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3208 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3209 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; 3210 break; 3211 case TA_FW_TYPE_PSP_DTM: 3212 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3213 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3214 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; 3215 break; 3216 case TA_FW_TYPE_PSP_RAP: 3217 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); 3218 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); 3219 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; 3220 break; 3221 case TA_FW_TYPE_PSP_SECUREDISPLAY: 3222 psp->securedisplay_context.context.bin_desc.fw_version = 3223 le32_to_cpu(desc->fw_version); 3224 psp->securedisplay_context.context.bin_desc.size_bytes = 3225 le32_to_cpu(desc->size_bytes); 3226 psp->securedisplay_context.context.bin_desc.start_addr = 3227 ucode_start_addr; 3228 break; 3229 default: 3230 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 3231 break; 3232 } 3233 3234 return 0; 3235 } 3236 3237 static int parse_ta_v1_microcode(struct psp_context *psp) 3238 { 3239 const struct ta_firmware_header_v1_0 *ta_hdr; 3240 struct amdgpu_device *adev = psp->adev; 3241 3242 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; 3243 3244 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1) 3245 return -EINVAL; 3246 3247 adev->psp.xgmi_context.context.bin_desc.fw_version = 3248 le32_to_cpu(ta_hdr->xgmi.fw_version); 3249 adev->psp.xgmi_context.context.bin_desc.size_bytes = 3250 le32_to_cpu(ta_hdr->xgmi.size_bytes); 3251 adev->psp.xgmi_context.context.bin_desc.start_addr = 3252 (uint8_t *)ta_hdr + 3253 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3254 3255 adev->psp.ras_context.context.bin_desc.fw_version = 3256 le32_to_cpu(ta_hdr->ras.fw_version); 3257 adev->psp.ras_context.context.bin_desc.size_bytes = 3258 le32_to_cpu(ta_hdr->ras.size_bytes); 3259 adev->psp.ras_context.context.bin_desc.start_addr = 3260 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + 3261 le32_to_cpu(ta_hdr->ras.offset_bytes); 3262 3263 adev->psp.hdcp_context.context.bin_desc.fw_version = 3264 le32_to_cpu(ta_hdr->hdcp.fw_version); 3265 adev->psp.hdcp_context.context.bin_desc.size_bytes = 3266 le32_to_cpu(ta_hdr->hdcp.size_bytes); 3267 adev->psp.hdcp_context.context.bin_desc.start_addr = 3268 (uint8_t *)ta_hdr + 3269 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 3270 3271 adev->psp.dtm_context.context.bin_desc.fw_version = 3272 le32_to_cpu(ta_hdr->dtm.fw_version); 3273 adev->psp.dtm_context.context.bin_desc.size_bytes = 3274 le32_to_cpu(ta_hdr->dtm.size_bytes); 3275 adev->psp.dtm_context.context.bin_desc.start_addr = 3276 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3277 le32_to_cpu(ta_hdr->dtm.offset_bytes); 3278 3279 adev->psp.securedisplay_context.context.bin_desc.fw_version = 3280 le32_to_cpu(ta_hdr->securedisplay.fw_version); 3281 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 3282 le32_to_cpu(ta_hdr->securedisplay.size_bytes); 3283 adev->psp.securedisplay_context.context.bin_desc.start_addr = 3284 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + 3285 le32_to_cpu(ta_hdr->securedisplay.offset_bytes); 3286 3287 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 3288 3289 return 0; 3290 } 3291 3292 static int parse_ta_v2_microcode(struct psp_context *psp) 3293 { 3294 const struct ta_firmware_header_v2_0 *ta_hdr; 3295 struct amdgpu_device *adev = psp->adev; 3296 int err = 0; 3297 int ta_index = 0; 3298 3299 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 3300 3301 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) 3302 return -EINVAL; 3303 3304 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { 3305 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 3306 return -EINVAL; 3307 } 3308 3309 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 3310 err = parse_ta_bin_descriptor(psp, 3311 &ta_hdr->ta_fw_bin[ta_index], 3312 ta_hdr); 3313 if (err) 3314 return err; 3315 } 3316 3317 return 0; 3318 } 3319 3320 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) 3321 { 3322 const struct common_firmware_header *hdr; 3323 struct amdgpu_device *adev = psp->adev; 3324 char fw_name[PSP_FW_NAME_LEN]; 3325 int err; 3326 3327 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 3328 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name); 3329 if (err) 3330 return err; 3331 3332 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data; 3333 switch (le16_to_cpu(hdr->header_version_major)) { 3334 case 1: 3335 err = parse_ta_v1_microcode(psp); 3336 break; 3337 case 2: 3338 err = parse_ta_v2_microcode(psp); 3339 break; 3340 default: 3341 dev_err(adev->dev, "unsupported TA header version\n"); 3342 err = -EINVAL; 3343 } 3344 3345 if (err) 3346 amdgpu_ucode_release(&adev->psp.ta_fw); 3347 3348 return err; 3349 } 3350 3351 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name) 3352 { 3353 struct amdgpu_device *adev = psp->adev; 3354 char fw_name[PSP_FW_NAME_LEN]; 3355 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; 3356 struct amdgpu_firmware_info *info = NULL; 3357 int err = 0; 3358 3359 if (!amdgpu_sriov_vf(adev)) { 3360 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); 3361 return -EINVAL; 3362 } 3363 3364 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name); 3365 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name); 3366 if (err) { 3367 if (err == -ENODEV) { 3368 dev_warn(adev->dev, "cap microcode does not exist, skip\n"); 3369 err = 0; 3370 goto out; 3371 } 3372 dev_err(adev->dev, "fail to initialize cap microcode\n"); 3373 } 3374 3375 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; 3376 info->ucode_id = AMDGPU_UCODE_ID_CAP; 3377 info->fw = adev->psp.cap_fw; 3378 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) 3379 adev->psp.cap_fw->data; 3380 adev->firmware.fw_size += ALIGN( 3381 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); 3382 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); 3383 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); 3384 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); 3385 3386 return 0; 3387 3388 out: 3389 amdgpu_ucode_release(&adev->psp.cap_fw); 3390 return err; 3391 } 3392 3393 static int psp_set_clockgating_state(void *handle, 3394 enum amd_clockgating_state state) 3395 { 3396 return 0; 3397 } 3398 3399 static int psp_set_powergating_state(void *handle, 3400 enum amd_powergating_state state) 3401 { 3402 return 0; 3403 } 3404 3405 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 3406 struct device_attribute *attr, 3407 char *buf) 3408 { 3409 struct drm_device *ddev = dev_get_drvdata(dev); 3410 struct amdgpu_device *adev = drm_to_adev(ddev); 3411 uint32_t fw_ver; 3412 int ret; 3413 3414 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3415 DRM_INFO("PSP block is not ready yet."); 3416 return -EBUSY; 3417 } 3418 3419 mutex_lock(&adev->psp.mutex); 3420 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 3421 mutex_unlock(&adev->psp.mutex); 3422 3423 if (ret) { 3424 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); 3425 return ret; 3426 } 3427 3428 return sysfs_emit(buf, "%x\n", fw_ver); 3429 } 3430 3431 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 3432 struct device_attribute *attr, 3433 const char *buf, 3434 size_t count) 3435 { 3436 struct drm_device *ddev = dev_get_drvdata(dev); 3437 struct amdgpu_device *adev = drm_to_adev(ddev); 3438 int ret, idx; 3439 char fw_name[100]; 3440 const struct firmware *usbc_pd_fw; 3441 struct amdgpu_bo *fw_buf_bo = NULL; 3442 uint64_t fw_pri_mc_addr; 3443 void *fw_pri_cpu_addr; 3444 3445 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 3446 DRM_INFO("PSP block is not ready yet."); 3447 return -EBUSY; 3448 } 3449 3450 if (!drm_dev_enter(ddev, &idx)) 3451 return -ENODEV; 3452 3453 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 3454 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 3455 if (ret) 3456 goto fail; 3457 3458 /* LFB address which is aligned to 1MB boundary per PSP request */ 3459 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 3460 AMDGPU_GEM_DOMAIN_VRAM | 3461 AMDGPU_GEM_DOMAIN_GTT, 3462 &fw_buf_bo, &fw_pri_mc_addr, 3463 &fw_pri_cpu_addr); 3464 if (ret) 3465 goto rel_buf; 3466 3467 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 3468 3469 mutex_lock(&adev->psp.mutex); 3470 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr); 3471 mutex_unlock(&adev->psp.mutex); 3472 3473 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3474 3475 rel_buf: 3476 release_firmware(usbc_pd_fw); 3477 fail: 3478 if (ret) { 3479 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); 3480 count = ret; 3481 } 3482 3483 drm_dev_exit(idx); 3484 return count; 3485 } 3486 3487 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) 3488 { 3489 int idx; 3490 3491 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) 3492 return; 3493 3494 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 3495 memcpy(psp->fw_pri_buf, start_addr, bin_size); 3496 3497 drm_dev_exit(idx); 3498 } 3499 3500 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, 3501 psp_usbc_pd_fw_sysfs_read, 3502 psp_usbc_pd_fw_sysfs_write); 3503 3504 int is_psp_fw_valid(struct psp_bin_desc bin) 3505 { 3506 return bin.size_bytes; 3507 } 3508 3509 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj, 3510 struct bin_attribute *bin_attr, 3511 char *buffer, loff_t pos, size_t count) 3512 { 3513 struct device *dev = kobj_to_dev(kobj); 3514 struct drm_device *ddev = dev_get_drvdata(dev); 3515 struct amdgpu_device *adev = drm_to_adev(ddev); 3516 3517 adev->psp.vbflash_done = false; 3518 3519 /* Safeguard against memory drain */ 3520 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) { 3521 dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B); 3522 kvfree(adev->psp.vbflash_tmp_buf); 3523 adev->psp.vbflash_tmp_buf = NULL; 3524 adev->psp.vbflash_image_size = 0; 3525 return -ENOMEM; 3526 } 3527 3528 /* TODO Just allocate max for now and optimize to realloc later if needed */ 3529 if (!adev->psp.vbflash_tmp_buf) { 3530 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL); 3531 if (!adev->psp.vbflash_tmp_buf) 3532 return -ENOMEM; 3533 } 3534 3535 mutex_lock(&adev->psp.mutex); 3536 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count); 3537 adev->psp.vbflash_image_size += count; 3538 mutex_unlock(&adev->psp.mutex); 3539 3540 dev_info(adev->dev, "VBIOS flash write PSP done"); 3541 3542 return count; 3543 } 3544 3545 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj, 3546 struct bin_attribute *bin_attr, char *buffer, 3547 loff_t pos, size_t count) 3548 { 3549 struct device *dev = kobj_to_dev(kobj); 3550 struct drm_device *ddev = dev_get_drvdata(dev); 3551 struct amdgpu_device *adev = drm_to_adev(ddev); 3552 struct amdgpu_bo *fw_buf_bo = NULL; 3553 uint64_t fw_pri_mc_addr; 3554 void *fw_pri_cpu_addr; 3555 int ret; 3556 3557 dev_info(adev->dev, "VBIOS flash to PSP started"); 3558 3559 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size, 3560 AMDGPU_GPU_PAGE_SIZE, 3561 AMDGPU_GEM_DOMAIN_VRAM, 3562 &fw_buf_bo, 3563 &fw_pri_mc_addr, 3564 &fw_pri_cpu_addr); 3565 if (ret) 3566 goto rel_buf; 3567 3568 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size); 3569 3570 mutex_lock(&adev->psp.mutex); 3571 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr); 3572 mutex_unlock(&adev->psp.mutex); 3573 3574 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr); 3575 3576 rel_buf: 3577 kvfree(adev->psp.vbflash_tmp_buf); 3578 adev->psp.vbflash_tmp_buf = NULL; 3579 adev->psp.vbflash_image_size = 0; 3580 3581 if (ret) { 3582 dev_err(adev->dev, "Failed to load VBIOS FW, err = %d", ret); 3583 return ret; 3584 } 3585 3586 dev_info(adev->dev, "VBIOS flash to PSP done"); 3587 return 0; 3588 } 3589 3590 static ssize_t amdgpu_psp_vbflash_status(struct device *dev, 3591 struct device_attribute *attr, 3592 char *buf) 3593 { 3594 struct drm_device *ddev = dev_get_drvdata(dev); 3595 struct amdgpu_device *adev = drm_to_adev(ddev); 3596 uint32_t vbflash_status; 3597 3598 vbflash_status = psp_vbflash_status(&adev->psp); 3599 if (!adev->psp.vbflash_done) 3600 vbflash_status = 0; 3601 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000)) 3602 vbflash_status = 1; 3603 3604 return sysfs_emit(buf, "0x%x\n", vbflash_status); 3605 } 3606 3607 static const struct bin_attribute psp_vbflash_bin_attr = { 3608 .attr = {.name = "psp_vbflash", .mode = 0664}, 3609 .size = 0, 3610 .write = amdgpu_psp_vbflash_write, 3611 .read = amdgpu_psp_vbflash_read, 3612 }; 3613 3614 static DEVICE_ATTR(psp_vbflash_status, 0444, amdgpu_psp_vbflash_status, NULL); 3615 3616 int amdgpu_psp_sysfs_init(struct amdgpu_device *adev) 3617 { 3618 int ret = 0; 3619 struct psp_context *psp = &adev->psp; 3620 3621 if (amdgpu_sriov_vf(adev)) 3622 return -EINVAL; 3623 3624 switch (adev->ip_versions[MP0_HWIP][0]) { 3625 case IP_VERSION(13, 0, 0): 3626 case IP_VERSION(13, 0, 7): 3627 if (!psp->adev) { 3628 psp->adev = adev; 3629 psp_v13_0_set_psp_funcs(psp); 3630 } 3631 ret = sysfs_create_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr); 3632 if (ret) 3633 dev_err(adev->dev, "Failed to create device file psp_vbflash"); 3634 ret = device_create_file(adev->dev, &dev_attr_psp_vbflash_status); 3635 if (ret) 3636 dev_err(adev->dev, "Failed to create device file psp_vbflash_status"); 3637 return ret; 3638 default: 3639 return 0; 3640 } 3641 } 3642 3643 const struct amd_ip_funcs psp_ip_funcs = { 3644 .name = "psp", 3645 .early_init = psp_early_init, 3646 .late_init = NULL, 3647 .sw_init = psp_sw_init, 3648 .sw_fini = psp_sw_fini, 3649 .hw_init = psp_hw_init, 3650 .hw_fini = psp_hw_fini, 3651 .suspend = psp_suspend, 3652 .resume = psp_resume, 3653 .is_idle = NULL, 3654 .check_soft_reset = NULL, 3655 .wait_for_idle = NULL, 3656 .soft_reset = NULL, 3657 .set_clockgating_state = psp_set_clockgating_state, 3658 .set_powergating_state = psp_set_powergating_state, 3659 }; 3660 3661 static int psp_sysfs_init(struct amdgpu_device *adev) 3662 { 3663 int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); 3664 3665 if (ret) 3666 DRM_ERROR("Failed to create USBC PD FW control file!"); 3667 3668 return ret; 3669 } 3670 3671 void amdgpu_psp_sysfs_fini(struct amdgpu_device *adev) 3672 { 3673 sysfs_remove_bin_file(&adev->dev->kobj, &psp_vbflash_bin_attr); 3674 device_remove_file(adev->dev, &dev_attr_psp_vbflash_status); 3675 } 3676 3677 static void psp_sysfs_fini(struct amdgpu_device *adev) 3678 { 3679 device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); 3680 } 3681 3682 const struct amdgpu_ip_block_version psp_v3_1_ip_block = 3683 { 3684 .type = AMD_IP_BLOCK_TYPE_PSP, 3685 .major = 3, 3686 .minor = 1, 3687 .rev = 0, 3688 .funcs = &psp_ip_funcs, 3689 }; 3690 3691 const struct amdgpu_ip_block_version psp_v10_0_ip_block = 3692 { 3693 .type = AMD_IP_BLOCK_TYPE_PSP, 3694 .major = 10, 3695 .minor = 0, 3696 .rev = 0, 3697 .funcs = &psp_ip_funcs, 3698 }; 3699 3700 const struct amdgpu_ip_block_version psp_v11_0_ip_block = 3701 { 3702 .type = AMD_IP_BLOCK_TYPE_PSP, 3703 .major = 11, 3704 .minor = 0, 3705 .rev = 0, 3706 .funcs = &psp_ip_funcs, 3707 }; 3708 3709 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = { 3710 .type = AMD_IP_BLOCK_TYPE_PSP, 3711 .major = 11, 3712 .minor = 0, 3713 .rev = 8, 3714 .funcs = &psp_ip_funcs, 3715 }; 3716 3717 const struct amdgpu_ip_block_version psp_v12_0_ip_block = 3718 { 3719 .type = AMD_IP_BLOCK_TYPE_PSP, 3720 .major = 12, 3721 .minor = 0, 3722 .rev = 0, 3723 .funcs = &psp_ip_funcs, 3724 }; 3725 3726 const struct amdgpu_ip_block_version psp_v13_0_ip_block = { 3727 .type = AMD_IP_BLOCK_TYPE_PSP, 3728 .major = 13, 3729 .minor = 0, 3730 .rev = 0, 3731 .funcs = &psp_ip_funcs, 3732 }; 3733 3734 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = { 3735 .type = AMD_IP_BLOCK_TYPE_PSP, 3736 .major = 13, 3737 .minor = 0, 3738 .rev = 4, 3739 .funcs = &psp_ip_funcs, 3740 }; 3741