1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41
42 #include "amdgpu_ras.h"
43 #include "amdgpu_securedisplay.h"
44 #include "amdgpu_atomfirmware.h"
45
46 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
47
48 static int psp_load_smu_fw(struct psp_context *psp);
49 static int psp_rap_terminate(struct psp_context *psp);
50 static int psp_securedisplay_terminate(struct psp_context *psp);
51
psp_ring_init(struct psp_context * psp,enum psp_ring_type ring_type)52 static int psp_ring_init(struct psp_context *psp,
53 enum psp_ring_type ring_type)
54 {
55 int ret = 0;
56 struct psp_ring *ring;
57 struct amdgpu_device *adev = psp->adev;
58
59 ring = &psp->km_ring;
60
61 ring->ring_type = ring_type;
62
63 /* allocate 4k Page of Local Frame Buffer memory for ring */
64 ring->ring_size = 0x1000;
65 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
66 AMDGPU_GEM_DOMAIN_VRAM |
67 AMDGPU_GEM_DOMAIN_GTT,
68 &adev->firmware.rbuf,
69 &ring->ring_mem_mc_addr,
70 (void **)&ring->ring_mem);
71 if (ret) {
72 ring->ring_size = 0;
73 return ret;
74 }
75
76 return 0;
77 }
78
79 /*
80 * Due to DF Cstate management centralized to PMFW, the firmware
81 * loading sequence will be updated as below:
82 * - Load KDB
83 * - Load SYS_DRV
84 * - Load tOS
85 * - Load PMFW
86 * - Setup TMR
87 * - Load other non-psp fw
88 * - Load ASD
89 * - Load XGMI/RAS/HDCP/DTM TA if any
90 *
91 * This new sequence is required for
92 * - Arcturus and onwards
93 */
psp_check_pmfw_centralized_cstate_management(struct psp_context * psp)94 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
95 {
96 struct amdgpu_device *adev = psp->adev;
97
98 if (amdgpu_sriov_vf(adev)) {
99 psp->pmfw_centralized_cstate_management = false;
100 return;
101 }
102
103 switch (adev->ip_versions[MP0_HWIP][0]) {
104 case IP_VERSION(11, 0, 0):
105 case IP_VERSION(11, 0, 4):
106 case IP_VERSION(11, 0, 5):
107 case IP_VERSION(11, 0, 7):
108 case IP_VERSION(11, 0, 9):
109 case IP_VERSION(11, 0, 11):
110 case IP_VERSION(11, 0, 12):
111 case IP_VERSION(11, 0, 13):
112 case IP_VERSION(13, 0, 0):
113 case IP_VERSION(13, 0, 2):
114 case IP_VERSION(13, 0, 7):
115 psp->pmfw_centralized_cstate_management = true;
116 break;
117 default:
118 psp->pmfw_centralized_cstate_management = false;
119 break;
120 }
121 }
122
psp_init_sriov_microcode(struct psp_context * psp)123 static int psp_init_sriov_microcode(struct psp_context *psp)
124 {
125 struct amdgpu_device *adev = psp->adev;
126 char ucode_prefix[30];
127 int ret = 0;
128
129 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
130
131 switch (adev->ip_versions[MP0_HWIP][0]) {
132 case IP_VERSION(9, 0, 0):
133 case IP_VERSION(11, 0, 7):
134 case IP_VERSION(11, 0, 9):
135 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
136 ret = psp_init_cap_microcode(psp, ucode_prefix);
137 break;
138 case IP_VERSION(13, 0, 2):
139 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
140 ret = psp_init_cap_microcode(psp, ucode_prefix);
141 ret &= psp_init_ta_microcode(psp, ucode_prefix);
142 break;
143 case IP_VERSION(13, 0, 0):
144 adev->virt.autoload_ucode_id = 0;
145 break;
146 case IP_VERSION(13, 0, 6):
147 ret = psp_init_cap_microcode(psp, ucode_prefix);
148 ret &= psp_init_ta_microcode(psp, ucode_prefix);
149 break;
150 case IP_VERSION(13, 0, 10):
151 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
152 ret = psp_init_cap_microcode(psp, ucode_prefix);
153 break;
154 default:
155 return -EINVAL;
156 }
157 return ret;
158 }
159
psp_early_init(void * handle)160 static int psp_early_init(void *handle)
161 {
162 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
163 struct psp_context *psp = &adev->psp;
164
165 switch (adev->ip_versions[MP0_HWIP][0]) {
166 case IP_VERSION(9, 0, 0):
167 psp_v3_1_set_psp_funcs(psp);
168 psp->autoload_supported = false;
169 break;
170 case IP_VERSION(10, 0, 0):
171 case IP_VERSION(10, 0, 1):
172 psp_v10_0_set_psp_funcs(psp);
173 psp->autoload_supported = false;
174 break;
175 case IP_VERSION(11, 0, 2):
176 case IP_VERSION(11, 0, 4):
177 psp_v11_0_set_psp_funcs(psp);
178 psp->autoload_supported = false;
179 break;
180 case IP_VERSION(11, 0, 0):
181 case IP_VERSION(11, 0, 7):
182 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
183 fallthrough;
184 case IP_VERSION(11, 0, 5):
185 case IP_VERSION(11, 0, 9):
186 case IP_VERSION(11, 0, 11):
187 case IP_VERSION(11, 5, 0):
188 case IP_VERSION(11, 0, 12):
189 case IP_VERSION(11, 0, 13):
190 psp_v11_0_set_psp_funcs(psp);
191 psp->autoload_supported = true;
192 break;
193 case IP_VERSION(11, 0, 3):
194 case IP_VERSION(12, 0, 1):
195 psp_v12_0_set_psp_funcs(psp);
196 break;
197 case IP_VERSION(13, 0, 2):
198 case IP_VERSION(13, 0, 6):
199 psp_v13_0_set_psp_funcs(psp);
200 break;
201 case IP_VERSION(13, 0, 1):
202 case IP_VERSION(13, 0, 3):
203 case IP_VERSION(13, 0, 5):
204 case IP_VERSION(13, 0, 8):
205 case IP_VERSION(13, 0, 11):
206 case IP_VERSION(14, 0, 0):
207 psp_v13_0_set_psp_funcs(psp);
208 psp->autoload_supported = true;
209 break;
210 case IP_VERSION(11, 0, 8):
211 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
212 psp_v11_0_8_set_psp_funcs(psp);
213 psp->autoload_supported = false;
214 }
215 break;
216 case IP_VERSION(13, 0, 0):
217 case IP_VERSION(13, 0, 7):
218 case IP_VERSION(13, 0, 10):
219 psp_v13_0_set_psp_funcs(psp);
220 psp->autoload_supported = true;
221 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
222 break;
223 case IP_VERSION(13, 0, 4):
224 psp_v13_0_4_set_psp_funcs(psp);
225 psp->autoload_supported = true;
226 break;
227 default:
228 return -EINVAL;
229 }
230
231 psp->adev = adev;
232
233 psp_check_pmfw_centralized_cstate_management(psp);
234
235 if (amdgpu_sriov_vf(adev))
236 return psp_init_sriov_microcode(psp);
237 else
238 return psp_init_microcode(psp);
239 }
240
psp_ta_free_shared_buf(struct ta_mem_context * mem_ctx)241 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
242 {
243 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
244 &mem_ctx->shared_buf);
245 mem_ctx->shared_bo = NULL;
246 }
247
psp_free_shared_bufs(struct psp_context * psp)248 static void psp_free_shared_bufs(struct psp_context *psp)
249 {
250 void *tmr_buf;
251 void **pptr;
252
253 /* free TMR memory buffer */
254 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
255 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
256 psp->tmr_bo = NULL;
257
258 /* free xgmi shared memory */
259 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
260
261 /* free ras shared memory */
262 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
263
264 /* free hdcp shared memory */
265 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
266
267 /* free dtm shared memory */
268 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
269
270 /* free rap shared memory */
271 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
272
273 /* free securedisplay shared memory */
274 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
275
276
277 }
278
psp_memory_training_fini(struct psp_context * psp)279 static void psp_memory_training_fini(struct psp_context *psp)
280 {
281 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
282
283 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
284 kfree(ctx->sys_cache);
285 ctx->sys_cache = NULL;
286 }
287
psp_memory_training_init(struct psp_context * psp)288 static int psp_memory_training_init(struct psp_context *psp)
289 {
290 int ret;
291 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
292
293 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
294 DRM_DEBUG("memory training is not supported!\n");
295 return 0;
296 }
297
298 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
299 if (ctx->sys_cache == NULL) {
300 DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
301 ret = -ENOMEM;
302 goto Err_out;
303 }
304
305 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
306 ctx->train_data_size,
307 ctx->p2c_train_data_offset,
308 ctx->c2p_train_data_offset);
309 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
310 return 0;
311
312 Err_out:
313 psp_memory_training_fini(psp);
314 return ret;
315 }
316
317 /*
318 * Helper funciton to query psp runtime database entry
319 *
320 * @adev: amdgpu_device pointer
321 * @entry_type: the type of psp runtime database entry
322 * @db_entry: runtime database entry pointer
323 *
324 * Return false if runtime database doesn't exit or entry is invalid
325 * or true if the specific database entry is found, and copy to @db_entry
326 */
psp_get_runtime_db_entry(struct amdgpu_device * adev,enum psp_runtime_entry_type entry_type,void * db_entry)327 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
328 enum psp_runtime_entry_type entry_type,
329 void *db_entry)
330 {
331 uint64_t db_header_pos, db_dir_pos;
332 struct psp_runtime_data_header db_header = {0};
333 struct psp_runtime_data_directory db_dir = {0};
334 bool ret = false;
335 int i;
336
337 if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6))
338 return false;
339
340 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
341 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
342
343 /* read runtime db header from vram */
344 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
345 sizeof(struct psp_runtime_data_header), false);
346
347 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
348 /* runtime db doesn't exist, exit */
349 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
350 return false;
351 }
352
353 /* read runtime database entry from vram */
354 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
355 sizeof(struct psp_runtime_data_directory), false);
356
357 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
358 /* invalid db entry count, exit */
359 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
360 return false;
361 }
362
363 /* look up for requested entry type */
364 for (i = 0; i < db_dir.entry_count && !ret; i++) {
365 if (db_dir.entry_list[i].entry_type == entry_type) {
366 switch (entry_type) {
367 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
368 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
369 /* invalid db entry size */
370 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
371 return false;
372 }
373 /* read runtime database entry */
374 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
375 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
376 ret = true;
377 break;
378 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
379 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
380 /* invalid db entry size */
381 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
382 return false;
383 }
384 /* read runtime database entry */
385 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
386 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
387 ret = true;
388 break;
389 default:
390 ret = false;
391 break;
392 }
393 }
394 }
395
396 return ret;
397 }
398
psp_sw_init(void * handle)399 static int psp_sw_init(void *handle)
400 {
401 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
402 struct psp_context *psp = &adev->psp;
403 int ret;
404 struct psp_runtime_boot_cfg_entry boot_cfg_entry;
405 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
406 struct psp_runtime_scpm_entry scpm_entry;
407
408 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
409 if (!psp->cmd) {
410 DRM_ERROR("Failed to allocate memory to command buffer!\n");
411 ret = -ENOMEM;
412 }
413
414 adev->psp.xgmi_context.supports_extended_data =
415 !adev->gmc.xgmi.connected_to_cpu &&
416 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2);
417
418 memset(&scpm_entry, 0, sizeof(scpm_entry));
419 if ((psp_get_runtime_db_entry(adev,
420 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
421 &scpm_entry)) &&
422 (scpm_entry.scpm_status != SCPM_DISABLE)) {
423 adev->scpm_enabled = true;
424 adev->scpm_status = scpm_entry.scpm_status;
425 } else {
426 adev->scpm_enabled = false;
427 adev->scpm_status = SCPM_DISABLE;
428 }
429
430 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
431
432 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
433 if (psp_get_runtime_db_entry(adev,
434 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
435 &boot_cfg_entry)) {
436 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
437 if ((psp->boot_cfg_bitmask) &
438 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
439 /* If psp runtime database exists, then
440 * only enable two stage memory training
441 * when TWO_STAGE_DRAM_TRAINING bit is set
442 * in runtime database
443 */
444 mem_training_ctx->enable_mem_training = true;
445 }
446
447 } else {
448 /* If psp runtime database doesn't exist or is
449 * invalid, force enable two stage memory training
450 */
451 mem_training_ctx->enable_mem_training = true;
452 }
453
454 if (mem_training_ctx->enable_mem_training) {
455 ret = psp_memory_training_init(psp);
456 if (ret) {
457 DRM_ERROR("Failed to initialize memory training!\n");
458 return ret;
459 }
460
461 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
462 if (ret) {
463 DRM_ERROR("Failed to process memory training!\n");
464 return ret;
465 }
466 }
467
468 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
469 amdgpu_sriov_vf(adev) ?
470 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
471 &psp->fw_pri_bo,
472 &psp->fw_pri_mc_addr,
473 &psp->fw_pri_buf);
474 if (ret)
475 return ret;
476
477 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
478 AMDGPU_GEM_DOMAIN_VRAM |
479 AMDGPU_GEM_DOMAIN_GTT,
480 &psp->fence_buf_bo,
481 &psp->fence_buf_mc_addr,
482 &psp->fence_buf);
483 if (ret)
484 goto failed1;
485
486 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
487 AMDGPU_GEM_DOMAIN_VRAM |
488 AMDGPU_GEM_DOMAIN_GTT,
489 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
490 (void **)&psp->cmd_buf_mem);
491 if (ret)
492 goto failed2;
493
494 return 0;
495
496 failed2:
497 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
498 &psp->fence_buf_mc_addr, &psp->fence_buf);
499 failed1:
500 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
501 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
502 return ret;
503 }
504
psp_sw_fini(void * handle)505 static int psp_sw_fini(void *handle)
506 {
507 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
508 struct psp_context *psp = &adev->psp;
509
510 psp_memory_training_fini(psp);
511
512 amdgpu_ucode_release(&psp->sos_fw);
513 amdgpu_ucode_release(&psp->asd_fw);
514 amdgpu_ucode_release(&psp->ta_fw);
515 amdgpu_ucode_release(&psp->cap_fw);
516 amdgpu_ucode_release(&psp->toc_fw);
517
518 kfree(psp->cmd);
519 psp->cmd = NULL;
520
521 psp_free_shared_bufs(psp);
522
523 if (psp->km_ring.ring_mem)
524 amdgpu_bo_free_kernel(&adev->firmware.rbuf,
525 &psp->km_ring.ring_mem_mc_addr,
526 (void **)&psp->km_ring.ring_mem);
527
528 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
529 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
530 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
531 &psp->fence_buf_mc_addr, &psp->fence_buf);
532 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
533 (void **)&psp->cmd_buf_mem);
534
535 return 0;
536 }
537
psp_wait_for(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,bool check_changed)538 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
539 uint32_t reg_val, uint32_t mask, bool check_changed)
540 {
541 uint32_t val;
542 int i;
543 struct amdgpu_device *adev = psp->adev;
544
545 if (psp->adev->no_hw_access)
546 return 0;
547
548 for (i = 0; i < adev->usec_timeout; i++) {
549 val = RREG32(reg_index);
550 if (check_changed) {
551 if (val != reg_val)
552 return 0;
553 } else {
554 if ((val & mask) == reg_val)
555 return 0;
556 }
557 udelay(1);
558 }
559
560 return -ETIME;
561 }
562
psp_wait_for_spirom_update(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,uint32_t msec_timeout)563 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
564 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
565 {
566 uint32_t val;
567 int i;
568 struct amdgpu_device *adev = psp->adev;
569
570 if (psp->adev->no_hw_access)
571 return 0;
572
573 for (i = 0; i < msec_timeout; i++) {
574 val = RREG32(reg_index);
575 if ((val & mask) == reg_val)
576 return 0;
577 msleep(1);
578 }
579
580 return -ETIME;
581 }
582
psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)583 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
584 {
585 switch (cmd_id) {
586 case GFX_CMD_ID_LOAD_TA:
587 return "LOAD_TA";
588 case GFX_CMD_ID_UNLOAD_TA:
589 return "UNLOAD_TA";
590 case GFX_CMD_ID_INVOKE_CMD:
591 return "INVOKE_CMD";
592 case GFX_CMD_ID_LOAD_ASD:
593 return "LOAD_ASD";
594 case GFX_CMD_ID_SETUP_TMR:
595 return "SETUP_TMR";
596 case GFX_CMD_ID_LOAD_IP_FW:
597 return "LOAD_IP_FW";
598 case GFX_CMD_ID_DESTROY_TMR:
599 return "DESTROY_TMR";
600 case GFX_CMD_ID_SAVE_RESTORE:
601 return "SAVE_RESTORE_IP_FW";
602 case GFX_CMD_ID_SETUP_VMR:
603 return "SETUP_VMR";
604 case GFX_CMD_ID_DESTROY_VMR:
605 return "DESTROY_VMR";
606 case GFX_CMD_ID_PROG_REG:
607 return "PROG_REG";
608 case GFX_CMD_ID_GET_FW_ATTESTATION:
609 return "GET_FW_ATTESTATION";
610 case GFX_CMD_ID_LOAD_TOC:
611 return "ID_LOAD_TOC";
612 case GFX_CMD_ID_AUTOLOAD_RLC:
613 return "AUTOLOAD_RLC";
614 case GFX_CMD_ID_BOOT_CFG:
615 return "BOOT_CFG";
616 default:
617 return "UNKNOWN CMD";
618 }
619 }
620
621 static int
psp_cmd_submit_buf(struct psp_context * psp,struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd,uint64_t fence_mc_addr)622 psp_cmd_submit_buf(struct psp_context *psp,
623 struct amdgpu_firmware_info *ucode,
624 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
625 {
626 int ret;
627 int index;
628 int timeout = 20000;
629 bool ras_intr = false;
630 bool skip_unsupport = false;
631
632 if (psp->adev->no_hw_access)
633 return 0;
634
635 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
636
637 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
638
639 index = atomic_inc_return(&psp->fence_value);
640 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
641 if (ret) {
642 atomic_dec(&psp->fence_value);
643 goto exit;
644 }
645
646 amdgpu_device_invalidate_hdp(psp->adev, NULL);
647 while (*((unsigned int *)psp->fence_buf) != index) {
648 if (--timeout == 0)
649 break;
650 /*
651 * Shouldn't wait for timeout when err_event_athub occurs,
652 * because gpu reset thread triggered and lock resource should
653 * be released for psp resume sequence.
654 */
655 ras_intr = amdgpu_ras_intr_triggered();
656 if (ras_intr)
657 break;
658 usleep_range(10, 100);
659 amdgpu_device_invalidate_hdp(psp->adev, NULL);
660 }
661
662 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
663 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
664 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
665
666 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
667
668 /* In some cases, psp response status is not 0 even there is no
669 * problem while the command is submitted. Some version of PSP FW
670 * doesn't write 0 to that field.
671 * So here we would like to only print a warning instead of an error
672 * during psp initialization to avoid breaking hw_init and it doesn't
673 * return -EINVAL.
674 */
675 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
676 if (ucode)
677 DRM_WARN("failed to load ucode %s(0x%X) ",
678 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
679 DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
680 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
681 psp->cmd_buf_mem->resp.status);
682 /* If any firmware (including CAP) load fails under SRIOV, it should
683 * return failure to stop the VF from initializing.
684 * Also return failure in case of timeout
685 */
686 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
687 ret = -EINVAL;
688 goto exit;
689 }
690 }
691
692 if (ucode) {
693 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
694 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
695 }
696
697 exit:
698 return ret;
699 }
700
acquire_psp_cmd_buf(struct psp_context * psp)701 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
702 {
703 struct psp_gfx_cmd_resp *cmd = psp->cmd;
704
705 mutex_lock(&psp->mutex);
706
707 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
708
709 return cmd;
710 }
711
release_psp_cmd_buf(struct psp_context * psp)712 static void release_psp_cmd_buf(struct psp_context *psp)
713 {
714 mutex_unlock(&psp->mutex);
715 }
716
psp_prep_tmr_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd,uint64_t tmr_mc,struct amdgpu_bo * tmr_bo)717 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
718 struct psp_gfx_cmd_resp *cmd,
719 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
720 {
721 struct amdgpu_device *adev = psp->adev;
722 uint32_t size = 0;
723 uint64_t tmr_pa = 0;
724
725 if (tmr_bo) {
726 size = amdgpu_bo_size(tmr_bo);
727 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
728 }
729
730 if (amdgpu_sriov_vf(psp->adev))
731 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
732 else
733 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
734 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
735 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
736 cmd->cmd.cmd_setup_tmr.buf_size = size;
737 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
738 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
739 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
740 }
741
psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t pri_buf_mc,uint32_t size)742 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
743 uint64_t pri_buf_mc, uint32_t size)
744 {
745 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
746 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
747 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
748 cmd->cmd.cmd_load_toc.toc_size = size;
749 }
750
751 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
psp_load_toc(struct psp_context * psp,uint32_t * tmr_size)752 static int psp_load_toc(struct psp_context *psp,
753 uint32_t *tmr_size)
754 {
755 int ret;
756 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
757
758 /* Copy toc to psp firmware private buffer */
759 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
760
761 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
762
763 ret = psp_cmd_submit_buf(psp, NULL, cmd,
764 psp->fence_buf_mc_addr);
765 if (!ret)
766 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
767
768 release_psp_cmd_buf(psp);
769
770 return ret;
771 }
772
psp_boottime_tmr(struct psp_context * psp)773 static bool psp_boottime_tmr(struct psp_context *psp)
774 {
775 switch (psp->adev->ip_versions[MP0_HWIP][0]) {
776 case IP_VERSION(13, 0, 6):
777 return true;
778 default:
779 return false;
780 }
781 }
782
783 /* Set up Trusted Memory Region */
psp_tmr_init(struct psp_context * psp)784 static int psp_tmr_init(struct psp_context *psp)
785 {
786 int ret = 0;
787 int tmr_size;
788 void *tmr_buf;
789 void **pptr;
790
791 /*
792 * According to HW engineer, they prefer the TMR address be "naturally
793 * aligned" , e.g. the start address be an integer divide of TMR size.
794 *
795 * Note: this memory need be reserved till the driver
796 * uninitializes.
797 */
798 tmr_size = PSP_TMR_SIZE(psp->adev);
799
800 /* For ASICs support RLC autoload, psp will parse the toc
801 * and calculate the total size of TMR needed
802 */
803 if (!amdgpu_sriov_vf(psp->adev) &&
804 psp->toc.start_addr &&
805 psp->toc.size_bytes &&
806 psp->fw_pri_buf) {
807 ret = psp_load_toc(psp, &tmr_size);
808 if (ret) {
809 DRM_ERROR("Failed to load toc\n");
810 return ret;
811 }
812 }
813
814 if (!psp->tmr_bo) {
815 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
816 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
817 PSP_TMR_ALIGNMENT,
818 AMDGPU_HAS_VRAM(psp->adev) ?
819 AMDGPU_GEM_DOMAIN_VRAM :
820 AMDGPU_GEM_DOMAIN_GTT,
821 &psp->tmr_bo, &psp->tmr_mc_addr,
822 pptr);
823 }
824
825 return ret;
826 }
827
psp_skip_tmr(struct psp_context * psp)828 static bool psp_skip_tmr(struct psp_context *psp)
829 {
830 switch (psp->adev->ip_versions[MP0_HWIP][0]) {
831 case IP_VERSION(11, 0, 9):
832 case IP_VERSION(11, 0, 7):
833 case IP_VERSION(13, 0, 2):
834 case IP_VERSION(13, 0, 6):
835 case IP_VERSION(13, 0, 10):
836 return true;
837 default:
838 return false;
839 }
840 }
841
psp_tmr_load(struct psp_context * psp)842 static int psp_tmr_load(struct psp_context *psp)
843 {
844 int ret;
845 struct psp_gfx_cmd_resp *cmd;
846
847 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
848 * Already set up by host driver.
849 */
850 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
851 return 0;
852
853 cmd = acquire_psp_cmd_buf(psp);
854
855 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
856 if (psp->tmr_bo)
857 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
858 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
859
860 ret = psp_cmd_submit_buf(psp, NULL, cmd,
861 psp->fence_buf_mc_addr);
862
863 release_psp_cmd_buf(psp);
864
865 return ret;
866 }
867
psp_prep_tmr_unload_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd)868 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
869 struct psp_gfx_cmd_resp *cmd)
870 {
871 if (amdgpu_sriov_vf(psp->adev))
872 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
873 else
874 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
875 }
876
psp_tmr_unload(struct psp_context * psp)877 static int psp_tmr_unload(struct psp_context *psp)
878 {
879 int ret;
880 struct psp_gfx_cmd_resp *cmd;
881
882 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
883 * as TMR is not loaded at all
884 */
885 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
886 return 0;
887
888 cmd = acquire_psp_cmd_buf(psp);
889
890 psp_prep_tmr_unload_cmd_buf(psp, cmd);
891 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
892
893 ret = psp_cmd_submit_buf(psp, NULL, cmd,
894 psp->fence_buf_mc_addr);
895
896 release_psp_cmd_buf(psp);
897
898 return ret;
899 }
900
psp_tmr_terminate(struct psp_context * psp)901 static int psp_tmr_terminate(struct psp_context *psp)
902 {
903 return psp_tmr_unload(psp);
904 }
905
psp_get_fw_attestation_records_addr(struct psp_context * psp,uint64_t * output_ptr)906 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
907 uint64_t *output_ptr)
908 {
909 int ret;
910 struct psp_gfx_cmd_resp *cmd;
911
912 if (!output_ptr)
913 return -EINVAL;
914
915 if (amdgpu_sriov_vf(psp->adev))
916 return 0;
917
918 cmd = acquire_psp_cmd_buf(psp);
919
920 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
921
922 ret = psp_cmd_submit_buf(psp, NULL, cmd,
923 psp->fence_buf_mc_addr);
924
925 if (!ret) {
926 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
927 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
928 }
929
930 release_psp_cmd_buf(psp);
931
932 return ret;
933 }
934
psp_boot_config_get(struct amdgpu_device * adev,uint32_t * boot_cfg)935 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
936 {
937 struct psp_context *psp = &adev->psp;
938 struct psp_gfx_cmd_resp *cmd;
939 int ret;
940
941 if (amdgpu_sriov_vf(adev))
942 return 0;
943
944 cmd = acquire_psp_cmd_buf(psp);
945
946 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
947 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
948
949 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
950 if (!ret) {
951 *boot_cfg =
952 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
953 }
954
955 release_psp_cmd_buf(psp);
956
957 return ret;
958 }
959
psp_boot_config_set(struct amdgpu_device * adev,uint32_t boot_cfg)960 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
961 {
962 int ret;
963 struct psp_context *psp = &adev->psp;
964 struct psp_gfx_cmd_resp *cmd;
965
966 if (amdgpu_sriov_vf(adev))
967 return 0;
968
969 cmd = acquire_psp_cmd_buf(psp);
970
971 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
972 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
973 cmd->cmd.boot_cfg.boot_config = boot_cfg;
974 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
975
976 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
977
978 release_psp_cmd_buf(psp);
979
980 return ret;
981 }
982
psp_rl_load(struct amdgpu_device * adev)983 static int psp_rl_load(struct amdgpu_device *adev)
984 {
985 int ret;
986 struct psp_context *psp = &adev->psp;
987 struct psp_gfx_cmd_resp *cmd;
988
989 if (!is_psp_fw_valid(psp->rl))
990 return 0;
991
992 cmd = acquire_psp_cmd_buf(psp);
993
994 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
995 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
996
997 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
998 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
999 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1000 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1001 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1002
1003 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1004
1005 release_psp_cmd_buf(psp);
1006
1007 return ret;
1008 }
1009
psp_spatial_partition(struct psp_context * psp,int mode)1010 int psp_spatial_partition(struct psp_context *psp, int mode)
1011 {
1012 struct psp_gfx_cmd_resp *cmd;
1013 int ret;
1014
1015 if (amdgpu_sriov_vf(psp->adev))
1016 return 0;
1017
1018 cmd = acquire_psp_cmd_buf(psp);
1019
1020 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1021 cmd->cmd.cmd_spatial_part.mode = mode;
1022
1023 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1024 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1025
1026 release_psp_cmd_buf(psp);
1027
1028 return ret;
1029 }
1030
psp_asd_initialize(struct psp_context * psp)1031 static int psp_asd_initialize(struct psp_context *psp)
1032 {
1033 int ret;
1034
1035 /* If PSP version doesn't match ASD version, asd loading will be failed.
1036 * add workaround to bypass it for sriov now.
1037 * TODO: add version check to make it common
1038 */
1039 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1040 return 0;
1041
1042 psp->asd_context.mem_context.shared_mc_addr = 0;
1043 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1044 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
1045
1046 ret = psp_ta_load(psp, &psp->asd_context);
1047 if (!ret)
1048 psp->asd_context.initialized = true;
1049
1050 return ret;
1051 }
1052
psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t session_id)1053 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1054 uint32_t session_id)
1055 {
1056 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1057 cmd->cmd.cmd_unload_ta.session_id = session_id;
1058 }
1059
psp_ta_unload(struct psp_context * psp,struct ta_context * context)1060 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1061 {
1062 int ret;
1063 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1064
1065 psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1066
1067 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1068
1069 context->resp_status = cmd->resp.status;
1070
1071 release_psp_cmd_buf(psp);
1072
1073 return ret;
1074 }
1075
psp_asd_terminate(struct psp_context * psp)1076 static int psp_asd_terminate(struct psp_context *psp)
1077 {
1078 int ret;
1079
1080 if (amdgpu_sriov_vf(psp->adev))
1081 return 0;
1082
1083 if (!psp->asd_context.initialized)
1084 return 0;
1085
1086 ret = psp_ta_unload(psp, &psp->asd_context);
1087 if (!ret)
1088 psp->asd_context.initialized = false;
1089
1090 return ret;
1091 }
1092
psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t id,uint32_t value)1093 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1094 uint32_t id, uint32_t value)
1095 {
1096 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1097 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1098 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1099 }
1100
psp_reg_program(struct psp_context * psp,enum psp_reg_prog_id reg,uint32_t value)1101 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1102 uint32_t value)
1103 {
1104 struct psp_gfx_cmd_resp *cmd;
1105 int ret = 0;
1106
1107 if (reg >= PSP_REG_LAST)
1108 return -EINVAL;
1109
1110 cmd = acquire_psp_cmd_buf(psp);
1111
1112 psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1113 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1114 if (ret)
1115 DRM_ERROR("PSP failed to program reg id %d", reg);
1116
1117 release_psp_cmd_buf(psp);
1118
1119 return ret;
1120 }
1121
psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t ta_bin_mc,struct ta_context * context)1122 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1123 uint64_t ta_bin_mc,
1124 struct ta_context *context)
1125 {
1126 cmd->cmd_id = context->ta_load_type;
1127 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
1128 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
1129 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
1130
1131 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1132 lower_32_bits(context->mem_context.shared_mc_addr);
1133 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1134 upper_32_bits(context->mem_context.shared_mc_addr);
1135 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1136 }
1137
psp_ta_init_shared_buf(struct psp_context * psp,struct ta_mem_context * mem_ctx)1138 int psp_ta_init_shared_buf(struct psp_context *psp,
1139 struct ta_mem_context *mem_ctx)
1140 {
1141 /*
1142 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1143 * physical) for ta to host memory
1144 */
1145 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1146 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1147 AMDGPU_GEM_DOMAIN_GTT,
1148 &mem_ctx->shared_bo,
1149 &mem_ctx->shared_mc_addr,
1150 &mem_ctx->shared_buf);
1151 }
1152
psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t ta_cmd_id,uint32_t session_id)1153 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1154 uint32_t ta_cmd_id,
1155 uint32_t session_id)
1156 {
1157 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
1158 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
1159 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
1160 }
1161
psp_ta_invoke(struct psp_context * psp,uint32_t ta_cmd_id,struct ta_context * context)1162 int psp_ta_invoke(struct psp_context *psp,
1163 uint32_t ta_cmd_id,
1164 struct ta_context *context)
1165 {
1166 int ret;
1167 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1168
1169 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1170
1171 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1172 psp->fence_buf_mc_addr);
1173
1174 context->resp_status = cmd->resp.status;
1175
1176 release_psp_cmd_buf(psp);
1177
1178 return ret;
1179 }
1180
psp_ta_load(struct psp_context * psp,struct ta_context * context)1181 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1182 {
1183 int ret;
1184 struct psp_gfx_cmd_resp *cmd;
1185
1186 cmd = acquire_psp_cmd_buf(psp);
1187
1188 psp_copy_fw(psp, context->bin_desc.start_addr,
1189 context->bin_desc.size_bytes);
1190
1191 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1192
1193 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1194 psp->fence_buf_mc_addr);
1195
1196 context->resp_status = cmd->resp.status;
1197
1198 if (!ret)
1199 context->session_id = cmd->resp.session_id;
1200
1201 release_psp_cmd_buf(psp);
1202
1203 return ret;
1204 }
1205
psp_xgmi_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1206 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1207 {
1208 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1209 }
1210
psp_xgmi_terminate(struct psp_context * psp)1211 int psp_xgmi_terminate(struct psp_context *psp)
1212 {
1213 int ret;
1214 struct amdgpu_device *adev = psp->adev;
1215
1216 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1217 if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
1218 (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
1219 adev->gmc.xgmi.connected_to_cpu))
1220 return 0;
1221
1222 if (!psp->xgmi_context.context.initialized)
1223 return 0;
1224
1225 ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1226
1227 psp->xgmi_context.context.initialized = false;
1228
1229 return ret;
1230 }
1231
psp_xgmi_initialize(struct psp_context * psp,bool set_extended_data,bool load_ta)1232 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1233 {
1234 struct ta_xgmi_shared_memory *xgmi_cmd;
1235 int ret;
1236
1237 if (!psp->ta_fw ||
1238 !psp->xgmi_context.context.bin_desc.size_bytes ||
1239 !psp->xgmi_context.context.bin_desc.start_addr)
1240 return -ENOENT;
1241
1242 if (!load_ta)
1243 goto invoke;
1244
1245 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1246 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1247
1248 if (!psp->xgmi_context.context.mem_context.shared_buf) {
1249 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1250 if (ret)
1251 return ret;
1252 }
1253
1254 /* Load XGMI TA */
1255 ret = psp_ta_load(psp, &psp->xgmi_context.context);
1256 if (!ret)
1257 psp->xgmi_context.context.initialized = true;
1258 else
1259 return ret;
1260
1261 invoke:
1262 /* Initialize XGMI session */
1263 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1264 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1265 xgmi_cmd->flag_extend_link_record = set_extended_data;
1266 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1267
1268 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1269
1270 return ret;
1271 }
1272
psp_xgmi_get_hive_id(struct psp_context * psp,uint64_t * hive_id)1273 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1274 {
1275 struct ta_xgmi_shared_memory *xgmi_cmd;
1276 int ret;
1277
1278 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1279 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1280
1281 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1282
1283 /* Invoke xgmi ta to get hive id */
1284 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1285 if (ret)
1286 return ret;
1287
1288 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1289
1290 return 0;
1291 }
1292
psp_xgmi_get_node_id(struct psp_context * psp,uint64_t * node_id)1293 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1294 {
1295 struct ta_xgmi_shared_memory *xgmi_cmd;
1296 int ret;
1297
1298 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1299 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1300
1301 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1302
1303 /* Invoke xgmi ta to get the node id */
1304 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1305 if (ret)
1306 return ret;
1307
1308 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1309
1310 return 0;
1311 }
1312
psp_xgmi_peer_link_info_supported(struct psp_context * psp)1313 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1314 {
1315 return (psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
1316 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1317 psp->adev->ip_versions[MP0_HWIP][0] >= IP_VERSION(13, 0, 6);
1318 }
1319
1320 /*
1321 * Chips that support extended topology information require the driver to
1322 * reflect topology information in the opposite direction. This is
1323 * because the TA has already exceeded its link record limit and if the
1324 * TA holds bi-directional information, the driver would have to do
1325 * multiple fetches instead of just two.
1326 */
psp_xgmi_reflect_topology_info(struct psp_context * psp,struct psp_xgmi_node_info node_info)1327 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1328 struct psp_xgmi_node_info node_info)
1329 {
1330 struct amdgpu_device *mirror_adev;
1331 struct amdgpu_hive_info *hive;
1332 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1333 uint64_t dst_node_id = node_info.node_id;
1334 uint8_t dst_num_hops = node_info.num_hops;
1335 uint8_t dst_num_links = node_info.num_links;
1336
1337 hive = amdgpu_get_xgmi_hive(psp->adev);
1338 if (WARN_ON(!hive))
1339 return;
1340
1341 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1342 struct psp_xgmi_topology_info *mirror_top_info;
1343 int j;
1344
1345 if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1346 continue;
1347
1348 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1349 for (j = 0; j < mirror_top_info->num_nodes; j++) {
1350 if (mirror_top_info->nodes[j].node_id != src_node_id)
1351 continue;
1352
1353 mirror_top_info->nodes[j].num_hops = dst_num_hops;
1354 /*
1355 * prevent 0 num_links value re-reflection since reflection
1356 * criteria is based on num_hops (direct or indirect).
1357 *
1358 */
1359 if (dst_num_links)
1360 mirror_top_info->nodes[j].num_links = dst_num_links;
1361
1362 break;
1363 }
1364
1365 break;
1366 }
1367
1368 amdgpu_put_xgmi_hive(hive);
1369 }
1370
psp_xgmi_get_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology,bool get_extended_data)1371 int psp_xgmi_get_topology_info(struct psp_context *psp,
1372 int number_devices,
1373 struct psp_xgmi_topology_info *topology,
1374 bool get_extended_data)
1375 {
1376 struct ta_xgmi_shared_memory *xgmi_cmd;
1377 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1378 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1379 int i;
1380 int ret;
1381
1382 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1383 return -EINVAL;
1384
1385 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1386 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1387 xgmi_cmd->flag_extend_link_record = get_extended_data;
1388
1389 /* Fill in the shared memory with topology information as input */
1390 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1391 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
1392 topology_info_input->num_nodes = number_devices;
1393
1394 for (i = 0; i < topology_info_input->num_nodes; i++) {
1395 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1396 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1397 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1398 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1399 }
1400
1401 /* Invoke xgmi ta to get the topology information */
1402 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
1403 if (ret)
1404 return ret;
1405
1406 /* Read the output topology information from the shared memory */
1407 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1408 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1409 for (i = 0; i < topology->num_nodes; i++) {
1410 /* extended data will either be 0 or equal to non-extended data */
1411 if (topology_info_output->nodes[i].num_hops)
1412 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1413
1414 /* non-extended data gets everything here so no need to update */
1415 if (!get_extended_data) {
1416 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1417 topology->nodes[i].is_sharing_enabled =
1418 topology_info_output->nodes[i].is_sharing_enabled;
1419 topology->nodes[i].sdma_engine =
1420 topology_info_output->nodes[i].sdma_engine;
1421 }
1422
1423 }
1424
1425 /* Invoke xgmi ta again to get the link information */
1426 if (psp_xgmi_peer_link_info_supported(psp)) {
1427 struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output;
1428 bool requires_reflection =
1429 (psp->xgmi_context.supports_extended_data && get_extended_data) ||
1430 psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6);
1431
1432 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1433
1434 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_PEER_LINKS);
1435
1436 if (ret)
1437 return ret;
1438
1439 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1440 for (i = 0; i < topology->num_nodes; i++) {
1441 /* accumulate num_links on extended data */
1442 topology->nodes[i].num_links = get_extended_data ?
1443 topology->nodes[i].num_links +
1444 link_info_output->nodes[i].num_links :
1445 ((requires_reflection && topology->nodes[i].num_links) ? topology->nodes[i].num_links :
1446 link_info_output->nodes[i].num_links);
1447
1448 /* reflect the topology information for bi-directionality */
1449 if (requires_reflection && topology->nodes[i].num_hops)
1450 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1451 }
1452 }
1453
1454 return 0;
1455 }
1456
psp_xgmi_set_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology)1457 int psp_xgmi_set_topology_info(struct psp_context *psp,
1458 int number_devices,
1459 struct psp_xgmi_topology_info *topology)
1460 {
1461 struct ta_xgmi_shared_memory *xgmi_cmd;
1462 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1463 int i;
1464
1465 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1466 return -EINVAL;
1467
1468 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1469 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1470
1471 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1472 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1473 topology_info_input->num_nodes = number_devices;
1474
1475 for (i = 0; i < topology_info_input->num_nodes; i++) {
1476 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1477 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1478 topology_info_input->nodes[i].is_sharing_enabled = 1;
1479 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1480 }
1481
1482 /* Invoke xgmi ta to set topology information */
1483 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1484 }
1485
1486 // ras begin
psp_ras_ta_check_status(struct psp_context * psp)1487 static void psp_ras_ta_check_status(struct psp_context *psp)
1488 {
1489 struct ta_ras_shared_memory *ras_cmd =
1490 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1491
1492 switch (ras_cmd->ras_status) {
1493 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1494 dev_warn(psp->adev->dev,
1495 "RAS WARNING: cmd failed due to unsupported ip\n");
1496 break;
1497 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1498 dev_warn(psp->adev->dev,
1499 "RAS WARNING: cmd failed due to unsupported error injection\n");
1500 break;
1501 case TA_RAS_STATUS__SUCCESS:
1502 break;
1503 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1504 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1505 dev_warn(psp->adev->dev,
1506 "RAS WARNING: Inject error to critical region is not allowed\n");
1507 break;
1508 default:
1509 dev_warn(psp->adev->dev,
1510 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1511 break;
1512 }
1513 }
1514
psp_ras_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1515 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1516 {
1517 struct ta_ras_shared_memory *ras_cmd;
1518 int ret;
1519
1520 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1521
1522 /*
1523 * TODO: bypass the loading in sriov for now
1524 */
1525 if (amdgpu_sriov_vf(psp->adev))
1526 return 0;
1527
1528 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1529
1530 if (amdgpu_ras_intr_triggered())
1531 return ret;
1532
1533 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1534 DRM_WARN("RAS: Unsupported Interface");
1535 return -EINVAL;
1536 }
1537
1538 if (!ret) {
1539 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1540 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1541
1542 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1543 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1544 dev_warn(psp->adev->dev,
1545 "RAS internal register access blocked\n");
1546
1547 psp_ras_ta_check_status(psp);
1548 }
1549
1550 return ret;
1551 }
1552
psp_ras_enable_features(struct psp_context * psp,union ta_ras_cmd_input * info,bool enable)1553 int psp_ras_enable_features(struct psp_context *psp,
1554 union ta_ras_cmd_input *info, bool enable)
1555 {
1556 struct ta_ras_shared_memory *ras_cmd;
1557 int ret;
1558
1559 if (!psp->ras_context.context.initialized)
1560 return -EINVAL;
1561
1562 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1563 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1564
1565 if (enable)
1566 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1567 else
1568 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1569
1570 ras_cmd->ras_in_message = *info;
1571
1572 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1573 if (ret)
1574 return -EINVAL;
1575
1576 return 0;
1577 }
1578
psp_ras_terminate(struct psp_context * psp)1579 int psp_ras_terminate(struct psp_context *psp)
1580 {
1581 int ret;
1582
1583 /*
1584 * TODO: bypass the terminate in sriov for now
1585 */
1586 if (amdgpu_sriov_vf(psp->adev))
1587 return 0;
1588
1589 if (!psp->ras_context.context.initialized)
1590 return 0;
1591
1592 ret = psp_ta_unload(psp, &psp->ras_context.context);
1593
1594 psp->ras_context.context.initialized = false;
1595
1596 return ret;
1597 }
1598
psp_ras_initialize(struct psp_context * psp)1599 int psp_ras_initialize(struct psp_context *psp)
1600 {
1601 int ret;
1602 uint32_t boot_cfg = 0xFF;
1603 struct amdgpu_device *adev = psp->adev;
1604 struct ta_ras_shared_memory *ras_cmd;
1605
1606 /*
1607 * TODO: bypass the initialize in sriov for now
1608 */
1609 if (amdgpu_sriov_vf(adev))
1610 return 0;
1611
1612 if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1613 !adev->psp.ras_context.context.bin_desc.start_addr) {
1614 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1615 return 0;
1616 }
1617
1618 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1619 /* query GECC enablement status from boot config
1620 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1621 */
1622 ret = psp_boot_config_get(adev, &boot_cfg);
1623 if (ret)
1624 dev_warn(adev->dev, "PSP get boot config failed\n");
1625
1626 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1627 if (!boot_cfg) {
1628 dev_info(adev->dev, "GECC is disabled\n");
1629 } else {
1630 /* disable GECC in next boot cycle if ras is
1631 * disabled by module parameter amdgpu_ras_enable
1632 * and/or amdgpu_ras_mask, or boot_config_get call
1633 * is failed
1634 */
1635 ret = psp_boot_config_set(adev, 0);
1636 if (ret)
1637 dev_warn(adev->dev, "PSP set boot config failed\n");
1638 else
1639 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1640 }
1641 } else {
1642 if (boot_cfg == 1) {
1643 dev_info(adev->dev, "GECC is enabled\n");
1644 } else {
1645 /* enable GECC in next boot cycle if it is disabled
1646 * in boot config, or force enable GECC if failed to
1647 * get boot configuration
1648 */
1649 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1650 if (ret)
1651 dev_warn(adev->dev, "PSP set boot config failed\n");
1652 else
1653 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1654 }
1655 }
1656 }
1657
1658 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1659 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1660
1661 if (!psp->ras_context.context.mem_context.shared_buf) {
1662 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1663 if (ret)
1664 return ret;
1665 }
1666
1667 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1668 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1669
1670 if (amdgpu_ras_is_poison_mode_supported(adev))
1671 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1672 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1673 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1674 ras_cmd->ras_in_message.init_flags.xcc_mask =
1675 adev->gfx.xcc_mask;
1676 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1677
1678 ret = psp_ta_load(psp, &psp->ras_context.context);
1679
1680 if (!ret && !ras_cmd->ras_status)
1681 psp->ras_context.context.initialized = true;
1682 else {
1683 if (ras_cmd->ras_status)
1684 dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1685
1686 /* fail to load RAS TA */
1687 psp->ras_context.context.initialized = false;
1688 }
1689
1690 return ret;
1691 }
1692
psp_ras_trigger_error(struct psp_context * psp,struct ta_ras_trigger_error_input * info,uint32_t instance_mask)1693 int psp_ras_trigger_error(struct psp_context *psp,
1694 struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1695 {
1696 struct ta_ras_shared_memory *ras_cmd;
1697 struct amdgpu_device *adev = psp->adev;
1698 int ret;
1699 uint32_t dev_mask;
1700
1701 if (!psp->ras_context.context.initialized)
1702 return -EINVAL;
1703
1704 switch (info->block_id) {
1705 case TA_RAS_BLOCK__GFX:
1706 dev_mask = GET_MASK(GC, instance_mask);
1707 break;
1708 case TA_RAS_BLOCK__SDMA:
1709 dev_mask = GET_MASK(SDMA0, instance_mask);
1710 break;
1711 case TA_RAS_BLOCK__VCN:
1712 case TA_RAS_BLOCK__JPEG:
1713 dev_mask = GET_MASK(VCN, instance_mask);
1714 break;
1715 default:
1716 dev_mask = instance_mask;
1717 break;
1718 }
1719
1720 /* reuse sub_block_index for backward compatibility */
1721 dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1722 dev_mask &= AMDGPU_RAS_INST_MASK;
1723 info->sub_block_index |= dev_mask;
1724
1725 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1726 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1727
1728 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1729 ras_cmd->ras_in_message.trigger_error = *info;
1730
1731 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1732 if (ret)
1733 return -EINVAL;
1734
1735 /* If err_event_athub occurs error inject was successful, however
1736 * return status from TA is no long reliable
1737 */
1738 if (amdgpu_ras_intr_triggered())
1739 return 0;
1740
1741 if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1742 return -EACCES;
1743 else if (ras_cmd->ras_status)
1744 return -EINVAL;
1745
1746 return 0;
1747 }
1748 // ras end
1749
1750 // HDCP start
psp_hdcp_initialize(struct psp_context * psp)1751 static int psp_hdcp_initialize(struct psp_context *psp)
1752 {
1753 int ret;
1754
1755 /*
1756 * TODO: bypass the initialize in sriov for now
1757 */
1758 if (amdgpu_sriov_vf(psp->adev))
1759 return 0;
1760
1761 if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1762 !psp->hdcp_context.context.bin_desc.start_addr) {
1763 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1764 return 0;
1765 }
1766
1767 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1768 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1769
1770 if (!psp->hdcp_context.context.mem_context.shared_buf) {
1771 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1772 if (ret)
1773 return ret;
1774 }
1775
1776 ret = psp_ta_load(psp, &psp->hdcp_context.context);
1777 if (!ret) {
1778 psp->hdcp_context.context.initialized = true;
1779 mutex_init(&psp->hdcp_context.mutex);
1780 }
1781
1782 return ret;
1783 }
1784
psp_hdcp_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1785 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1786 {
1787 /*
1788 * TODO: bypass the loading in sriov for now
1789 */
1790 if (amdgpu_sriov_vf(psp->adev))
1791 return 0;
1792
1793 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1794 }
1795
psp_hdcp_terminate(struct psp_context * psp)1796 static int psp_hdcp_terminate(struct psp_context *psp)
1797 {
1798 int ret;
1799
1800 /*
1801 * TODO: bypass the terminate in sriov for now
1802 */
1803 if (amdgpu_sriov_vf(psp->adev))
1804 return 0;
1805
1806 if (!psp->hdcp_context.context.initialized)
1807 return 0;
1808
1809 ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1810
1811 psp->hdcp_context.context.initialized = false;
1812
1813 return ret;
1814 }
1815 // HDCP end
1816
1817 // DTM start
psp_dtm_initialize(struct psp_context * psp)1818 static int psp_dtm_initialize(struct psp_context *psp)
1819 {
1820 int ret;
1821
1822 /*
1823 * TODO: bypass the initialize in sriov for now
1824 */
1825 if (amdgpu_sriov_vf(psp->adev))
1826 return 0;
1827
1828 if (!psp->dtm_context.context.bin_desc.size_bytes ||
1829 !psp->dtm_context.context.bin_desc.start_addr) {
1830 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1831 return 0;
1832 }
1833
1834 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1835 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1836
1837 if (!psp->dtm_context.context.mem_context.shared_buf) {
1838 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
1839 if (ret)
1840 return ret;
1841 }
1842
1843 ret = psp_ta_load(psp, &psp->dtm_context.context);
1844 if (!ret) {
1845 psp->dtm_context.context.initialized = true;
1846 mutex_init(&psp->dtm_context.mutex);
1847 }
1848
1849 return ret;
1850 }
1851
psp_dtm_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1852 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1853 {
1854 /*
1855 * TODO: bypass the loading in sriov for now
1856 */
1857 if (amdgpu_sriov_vf(psp->adev))
1858 return 0;
1859
1860 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
1861 }
1862
psp_dtm_terminate(struct psp_context * psp)1863 static int psp_dtm_terminate(struct psp_context *psp)
1864 {
1865 int ret;
1866
1867 /*
1868 * TODO: bypass the terminate in sriov for now
1869 */
1870 if (amdgpu_sriov_vf(psp->adev))
1871 return 0;
1872
1873 if (!psp->dtm_context.context.initialized)
1874 return 0;
1875
1876 ret = psp_ta_unload(psp, &psp->dtm_context.context);
1877
1878 psp->dtm_context.context.initialized = false;
1879
1880 return ret;
1881 }
1882 // DTM end
1883
1884 // RAP start
psp_rap_initialize(struct psp_context * psp)1885 static int psp_rap_initialize(struct psp_context *psp)
1886 {
1887 int ret;
1888 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
1889
1890 /*
1891 * TODO: bypass the initialize in sriov for now
1892 */
1893 if (amdgpu_sriov_vf(psp->adev))
1894 return 0;
1895
1896 if (!psp->rap_context.context.bin_desc.size_bytes ||
1897 !psp->rap_context.context.bin_desc.start_addr) {
1898 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
1899 return 0;
1900 }
1901
1902 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
1903 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1904
1905 if (!psp->rap_context.context.mem_context.shared_buf) {
1906 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
1907 if (ret)
1908 return ret;
1909 }
1910
1911 ret = psp_ta_load(psp, &psp->rap_context.context);
1912 if (!ret) {
1913 psp->rap_context.context.initialized = true;
1914 mutex_init(&psp->rap_context.mutex);
1915 } else
1916 return ret;
1917
1918 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
1919 if (ret || status != TA_RAP_STATUS__SUCCESS) {
1920 psp_rap_terminate(psp);
1921 /* free rap shared memory */
1922 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
1923
1924 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
1925 ret, status);
1926
1927 return ret;
1928 }
1929
1930 return 0;
1931 }
1932
psp_rap_terminate(struct psp_context * psp)1933 static int psp_rap_terminate(struct psp_context *psp)
1934 {
1935 int ret;
1936
1937 if (!psp->rap_context.context.initialized)
1938 return 0;
1939
1940 ret = psp_ta_unload(psp, &psp->rap_context.context);
1941
1942 psp->rap_context.context.initialized = false;
1943
1944 return ret;
1945 }
1946
psp_rap_invoke(struct psp_context * psp,uint32_t ta_cmd_id,enum ta_rap_status * status)1947 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
1948 {
1949 struct ta_rap_shared_memory *rap_cmd;
1950 int ret = 0;
1951
1952 if (!psp->rap_context.context.initialized)
1953 return 0;
1954
1955 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
1956 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
1957 return -EINVAL;
1958
1959 mutex_lock(&psp->rap_context.mutex);
1960
1961 rap_cmd = (struct ta_rap_shared_memory *)
1962 psp->rap_context.context.mem_context.shared_buf;
1963 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
1964
1965 rap_cmd->cmd_id = ta_cmd_id;
1966 rap_cmd->validation_method_id = METHOD_A;
1967
1968 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
1969 if (ret)
1970 goto out_unlock;
1971
1972 if (status)
1973 *status = rap_cmd->rap_status;
1974
1975 out_unlock:
1976 mutex_unlock(&psp->rap_context.mutex);
1977
1978 return ret;
1979 }
1980 // RAP end
1981
1982 /* securedisplay start */
psp_securedisplay_initialize(struct psp_context * psp)1983 static int psp_securedisplay_initialize(struct psp_context *psp)
1984 {
1985 int ret;
1986 struct ta_securedisplay_cmd *securedisplay_cmd;
1987
1988 /*
1989 * TODO: bypass the initialize in sriov for now
1990 */
1991 if (amdgpu_sriov_vf(psp->adev))
1992 return 0;
1993
1994 if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
1995 !psp->securedisplay_context.context.bin_desc.start_addr) {
1996 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
1997 return 0;
1998 }
1999
2000 psp->securedisplay_context.context.mem_context.shared_mem_size =
2001 PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2002 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2003
2004 if (!psp->securedisplay_context.context.initialized) {
2005 ret = psp_ta_init_shared_buf(psp,
2006 &psp->securedisplay_context.context.mem_context);
2007 if (ret)
2008 return ret;
2009 }
2010
2011 ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2012 if (!ret) {
2013 psp->securedisplay_context.context.initialized = true;
2014 mutex_init(&psp->securedisplay_context.mutex);
2015 } else
2016 return ret;
2017
2018 mutex_lock(&psp->securedisplay_context.mutex);
2019
2020 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2021 TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2022
2023 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2024
2025 mutex_unlock(&psp->securedisplay_context.mutex);
2026
2027 if (ret) {
2028 psp_securedisplay_terminate(psp);
2029 /* free securedisplay shared memory */
2030 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2031 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2032 return -EINVAL;
2033 }
2034
2035 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2036 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2037 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2038 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2039 /* don't try again */
2040 psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2041 }
2042
2043 return 0;
2044 }
2045
psp_securedisplay_terminate(struct psp_context * psp)2046 static int psp_securedisplay_terminate(struct psp_context *psp)
2047 {
2048 int ret;
2049
2050 /*
2051 * TODO:bypass the terminate in sriov for now
2052 */
2053 if (amdgpu_sriov_vf(psp->adev))
2054 return 0;
2055
2056 if (!psp->securedisplay_context.context.initialized)
2057 return 0;
2058
2059 ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2060
2061 psp->securedisplay_context.context.initialized = false;
2062
2063 return ret;
2064 }
2065
psp_securedisplay_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2066 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2067 {
2068 int ret;
2069
2070 if (!psp->securedisplay_context.context.initialized)
2071 return -EINVAL;
2072
2073 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2074 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2075 return -EINVAL;
2076
2077 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2078
2079 return ret;
2080 }
2081 /* SECUREDISPLAY end */
2082
amdgpu_psp_wait_for_bootloader(struct amdgpu_device * adev)2083 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2084 {
2085 struct psp_context *psp = &adev->psp;
2086 int ret = 0;
2087
2088 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2089 ret = psp->funcs->wait_for_bootloader(psp);
2090
2091 return ret;
2092 }
2093
psp_hw_start(struct psp_context * psp)2094 static int psp_hw_start(struct psp_context *psp)
2095 {
2096 struct amdgpu_device *adev = psp->adev;
2097 int ret;
2098
2099 if (!amdgpu_sriov_vf(adev)) {
2100 if ((is_psp_fw_valid(psp->kdb)) &&
2101 (psp->funcs->bootloader_load_kdb != NULL)) {
2102 ret = psp_bootloader_load_kdb(psp);
2103 if (ret) {
2104 DRM_ERROR("PSP load kdb failed!\n");
2105 return ret;
2106 }
2107 }
2108
2109 if ((is_psp_fw_valid(psp->spl)) &&
2110 (psp->funcs->bootloader_load_spl != NULL)) {
2111 ret = psp_bootloader_load_spl(psp);
2112 if (ret) {
2113 DRM_ERROR("PSP load spl failed!\n");
2114 return ret;
2115 }
2116 }
2117
2118 if ((is_psp_fw_valid(psp->sys)) &&
2119 (psp->funcs->bootloader_load_sysdrv != NULL)) {
2120 ret = psp_bootloader_load_sysdrv(psp);
2121 if (ret) {
2122 DRM_ERROR("PSP load sys drv failed!\n");
2123 return ret;
2124 }
2125 }
2126
2127 if ((is_psp_fw_valid(psp->soc_drv)) &&
2128 (psp->funcs->bootloader_load_soc_drv != NULL)) {
2129 ret = psp_bootloader_load_soc_drv(psp);
2130 if (ret) {
2131 DRM_ERROR("PSP load soc drv failed!\n");
2132 return ret;
2133 }
2134 }
2135
2136 if ((is_psp_fw_valid(psp->intf_drv)) &&
2137 (psp->funcs->bootloader_load_intf_drv != NULL)) {
2138 ret = psp_bootloader_load_intf_drv(psp);
2139 if (ret) {
2140 DRM_ERROR("PSP load intf drv failed!\n");
2141 return ret;
2142 }
2143 }
2144
2145 if ((is_psp_fw_valid(psp->dbg_drv)) &&
2146 (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2147 ret = psp_bootloader_load_dbg_drv(psp);
2148 if (ret) {
2149 DRM_ERROR("PSP load dbg drv failed!\n");
2150 return ret;
2151 }
2152 }
2153
2154 if ((is_psp_fw_valid(psp->ras_drv)) &&
2155 (psp->funcs->bootloader_load_ras_drv != NULL)) {
2156 ret = psp_bootloader_load_ras_drv(psp);
2157 if (ret) {
2158 DRM_ERROR("PSP load ras_drv failed!\n");
2159 return ret;
2160 }
2161 }
2162
2163 if ((is_psp_fw_valid(psp->sos)) &&
2164 (psp->funcs->bootloader_load_sos != NULL)) {
2165 ret = psp_bootloader_load_sos(psp);
2166 if (ret) {
2167 DRM_ERROR("PSP load sos failed!\n");
2168 return ret;
2169 }
2170 }
2171 }
2172
2173 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2174 if (ret) {
2175 DRM_ERROR("PSP create ring failed!\n");
2176 return ret;
2177 }
2178
2179 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2180 goto skip_pin_bo;
2181
2182 if (!psp_boottime_tmr(psp)) {
2183 ret = psp_tmr_init(psp);
2184 if (ret) {
2185 DRM_ERROR("PSP tmr init failed!\n");
2186 return ret;
2187 }
2188 }
2189
2190 skip_pin_bo:
2191 /*
2192 * For ASICs with DF Cstate management centralized
2193 * to PMFW, TMR setup should be performed after PMFW
2194 * loaded and before other non-psp firmware loaded.
2195 */
2196 if (psp->pmfw_centralized_cstate_management) {
2197 ret = psp_load_smu_fw(psp);
2198 if (ret)
2199 return ret;
2200 }
2201
2202 ret = psp_tmr_load(psp);
2203 if (ret) {
2204 DRM_ERROR("PSP load tmr failed!\n");
2205 return ret;
2206 }
2207
2208 return 0;
2209 }
2210
psp_get_fw_type(struct amdgpu_firmware_info * ucode,enum psp_gfx_fw_type * type)2211 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2212 enum psp_gfx_fw_type *type)
2213 {
2214 switch (ucode->ucode_id) {
2215 case AMDGPU_UCODE_ID_CAP:
2216 *type = GFX_FW_TYPE_CAP;
2217 break;
2218 case AMDGPU_UCODE_ID_SDMA0:
2219 *type = GFX_FW_TYPE_SDMA0;
2220 break;
2221 case AMDGPU_UCODE_ID_SDMA1:
2222 *type = GFX_FW_TYPE_SDMA1;
2223 break;
2224 case AMDGPU_UCODE_ID_SDMA2:
2225 *type = GFX_FW_TYPE_SDMA2;
2226 break;
2227 case AMDGPU_UCODE_ID_SDMA3:
2228 *type = GFX_FW_TYPE_SDMA3;
2229 break;
2230 case AMDGPU_UCODE_ID_SDMA4:
2231 *type = GFX_FW_TYPE_SDMA4;
2232 break;
2233 case AMDGPU_UCODE_ID_SDMA5:
2234 *type = GFX_FW_TYPE_SDMA5;
2235 break;
2236 case AMDGPU_UCODE_ID_SDMA6:
2237 *type = GFX_FW_TYPE_SDMA6;
2238 break;
2239 case AMDGPU_UCODE_ID_SDMA7:
2240 *type = GFX_FW_TYPE_SDMA7;
2241 break;
2242 case AMDGPU_UCODE_ID_CP_MES:
2243 *type = GFX_FW_TYPE_CP_MES;
2244 break;
2245 case AMDGPU_UCODE_ID_CP_MES_DATA:
2246 *type = GFX_FW_TYPE_MES_STACK;
2247 break;
2248 case AMDGPU_UCODE_ID_CP_MES1:
2249 *type = GFX_FW_TYPE_CP_MES_KIQ;
2250 break;
2251 case AMDGPU_UCODE_ID_CP_MES1_DATA:
2252 *type = GFX_FW_TYPE_MES_KIQ_STACK;
2253 break;
2254 case AMDGPU_UCODE_ID_CP_CE:
2255 *type = GFX_FW_TYPE_CP_CE;
2256 break;
2257 case AMDGPU_UCODE_ID_CP_PFP:
2258 *type = GFX_FW_TYPE_CP_PFP;
2259 break;
2260 case AMDGPU_UCODE_ID_CP_ME:
2261 *type = GFX_FW_TYPE_CP_ME;
2262 break;
2263 case AMDGPU_UCODE_ID_CP_MEC1:
2264 *type = GFX_FW_TYPE_CP_MEC;
2265 break;
2266 case AMDGPU_UCODE_ID_CP_MEC1_JT:
2267 *type = GFX_FW_TYPE_CP_MEC_ME1;
2268 break;
2269 case AMDGPU_UCODE_ID_CP_MEC2:
2270 *type = GFX_FW_TYPE_CP_MEC;
2271 break;
2272 case AMDGPU_UCODE_ID_CP_MEC2_JT:
2273 *type = GFX_FW_TYPE_CP_MEC_ME2;
2274 break;
2275 case AMDGPU_UCODE_ID_RLC_P:
2276 *type = GFX_FW_TYPE_RLC_P;
2277 break;
2278 case AMDGPU_UCODE_ID_RLC_V:
2279 *type = GFX_FW_TYPE_RLC_V;
2280 break;
2281 case AMDGPU_UCODE_ID_RLC_G:
2282 *type = GFX_FW_TYPE_RLC_G;
2283 break;
2284 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2285 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2286 break;
2287 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2288 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2289 break;
2290 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2291 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2292 break;
2293 case AMDGPU_UCODE_ID_RLC_IRAM:
2294 *type = GFX_FW_TYPE_RLC_IRAM;
2295 break;
2296 case AMDGPU_UCODE_ID_RLC_DRAM:
2297 *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2298 break;
2299 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2300 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2301 break;
2302 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2303 *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2304 break;
2305 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2306 *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2307 break;
2308 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2309 *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2310 break;
2311 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2312 *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2313 break;
2314 case AMDGPU_UCODE_ID_SMC:
2315 *type = GFX_FW_TYPE_SMU;
2316 break;
2317 case AMDGPU_UCODE_ID_PPTABLE:
2318 *type = GFX_FW_TYPE_PPTABLE;
2319 break;
2320 case AMDGPU_UCODE_ID_UVD:
2321 *type = GFX_FW_TYPE_UVD;
2322 break;
2323 case AMDGPU_UCODE_ID_UVD1:
2324 *type = GFX_FW_TYPE_UVD1;
2325 break;
2326 case AMDGPU_UCODE_ID_VCE:
2327 *type = GFX_FW_TYPE_VCE;
2328 break;
2329 case AMDGPU_UCODE_ID_VCN:
2330 *type = GFX_FW_TYPE_VCN;
2331 break;
2332 case AMDGPU_UCODE_ID_VCN1:
2333 *type = GFX_FW_TYPE_VCN1;
2334 break;
2335 case AMDGPU_UCODE_ID_DMCU_ERAM:
2336 *type = GFX_FW_TYPE_DMCU_ERAM;
2337 break;
2338 case AMDGPU_UCODE_ID_DMCU_INTV:
2339 *type = GFX_FW_TYPE_DMCU_ISR;
2340 break;
2341 case AMDGPU_UCODE_ID_VCN0_RAM:
2342 *type = GFX_FW_TYPE_VCN0_RAM;
2343 break;
2344 case AMDGPU_UCODE_ID_VCN1_RAM:
2345 *type = GFX_FW_TYPE_VCN1_RAM;
2346 break;
2347 case AMDGPU_UCODE_ID_DMCUB:
2348 *type = GFX_FW_TYPE_DMUB;
2349 break;
2350 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2351 *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2352 break;
2353 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2354 *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2355 break;
2356 case AMDGPU_UCODE_ID_IMU_I:
2357 *type = GFX_FW_TYPE_IMU_I;
2358 break;
2359 case AMDGPU_UCODE_ID_IMU_D:
2360 *type = GFX_FW_TYPE_IMU_D;
2361 break;
2362 case AMDGPU_UCODE_ID_CP_RS64_PFP:
2363 *type = GFX_FW_TYPE_RS64_PFP;
2364 break;
2365 case AMDGPU_UCODE_ID_CP_RS64_ME:
2366 *type = GFX_FW_TYPE_RS64_ME;
2367 break;
2368 case AMDGPU_UCODE_ID_CP_RS64_MEC:
2369 *type = GFX_FW_TYPE_RS64_MEC;
2370 break;
2371 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2372 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2373 break;
2374 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2375 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2376 break;
2377 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2378 *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2379 break;
2380 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2381 *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2382 break;
2383 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2384 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2385 break;
2386 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2387 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2388 break;
2389 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2390 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2391 break;
2392 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2393 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2394 break;
2395 case AMDGPU_UCODE_ID_MAXIMUM:
2396 default:
2397 return -EINVAL;
2398 }
2399
2400 return 0;
2401 }
2402
psp_print_fw_hdr(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2403 static void psp_print_fw_hdr(struct psp_context *psp,
2404 struct amdgpu_firmware_info *ucode)
2405 {
2406 struct amdgpu_device *adev = psp->adev;
2407 struct common_firmware_header *hdr;
2408
2409 switch (ucode->ucode_id) {
2410 case AMDGPU_UCODE_ID_SDMA0:
2411 case AMDGPU_UCODE_ID_SDMA1:
2412 case AMDGPU_UCODE_ID_SDMA2:
2413 case AMDGPU_UCODE_ID_SDMA3:
2414 case AMDGPU_UCODE_ID_SDMA4:
2415 case AMDGPU_UCODE_ID_SDMA5:
2416 case AMDGPU_UCODE_ID_SDMA6:
2417 case AMDGPU_UCODE_ID_SDMA7:
2418 hdr = (struct common_firmware_header *)
2419 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2420 amdgpu_ucode_print_sdma_hdr(hdr);
2421 break;
2422 case AMDGPU_UCODE_ID_CP_CE:
2423 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2424 amdgpu_ucode_print_gfx_hdr(hdr);
2425 break;
2426 case AMDGPU_UCODE_ID_CP_PFP:
2427 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2428 amdgpu_ucode_print_gfx_hdr(hdr);
2429 break;
2430 case AMDGPU_UCODE_ID_CP_ME:
2431 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2432 amdgpu_ucode_print_gfx_hdr(hdr);
2433 break;
2434 case AMDGPU_UCODE_ID_CP_MEC1:
2435 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2436 amdgpu_ucode_print_gfx_hdr(hdr);
2437 break;
2438 case AMDGPU_UCODE_ID_RLC_G:
2439 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2440 amdgpu_ucode_print_rlc_hdr(hdr);
2441 break;
2442 case AMDGPU_UCODE_ID_SMC:
2443 hdr = (struct common_firmware_header *)adev->pm.fw->data;
2444 amdgpu_ucode_print_smc_hdr(hdr);
2445 break;
2446 default:
2447 break;
2448 }
2449 }
2450
psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd)2451 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
2452 struct psp_gfx_cmd_resp *cmd)
2453 {
2454 int ret;
2455 uint64_t fw_mem_mc_addr = ucode->mc_addr;
2456
2457 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2458 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2459 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2460 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2461
2462 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2463 if (ret)
2464 DRM_ERROR("Unknown firmware type\n");
2465
2466 return ret;
2467 }
2468
psp_execute_ip_fw_load(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2469 int psp_execute_ip_fw_load(struct psp_context *psp,
2470 struct amdgpu_firmware_info *ucode)
2471 {
2472 int ret = 0;
2473 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2474
2475 ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd);
2476 if (!ret) {
2477 ret = psp_cmd_submit_buf(psp, ucode, cmd,
2478 psp->fence_buf_mc_addr);
2479 }
2480
2481 release_psp_cmd_buf(psp);
2482
2483 return ret;
2484 }
2485
psp_load_smu_fw(struct psp_context * psp)2486 static int psp_load_smu_fw(struct psp_context *psp)
2487 {
2488 int ret;
2489 struct amdgpu_device *adev = psp->adev;
2490 struct amdgpu_firmware_info *ucode =
2491 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2492 struct amdgpu_ras *ras = psp->ras_context.ras;
2493
2494 /*
2495 * Skip SMU FW reloading in case of using BACO for runpm only,
2496 * as SMU is always alive.
2497 */
2498 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2499 return 0;
2500
2501 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2502 return 0;
2503
2504 if ((amdgpu_in_reset(adev) &&
2505 ras && adev->ras_enabled &&
2506 (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
2507 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) {
2508 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2509 if (ret)
2510 DRM_WARN("Failed to set MP1 state prepare for reload\n");
2511 }
2512
2513 ret = psp_execute_ip_fw_load(psp, ucode);
2514
2515 if (ret)
2516 DRM_ERROR("PSP load smu failed!\n");
2517
2518 return ret;
2519 }
2520
fw_load_skip_check(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2521 static bool fw_load_skip_check(struct psp_context *psp,
2522 struct amdgpu_firmware_info *ucode)
2523 {
2524 if (!ucode->fw || !ucode->ucode_size)
2525 return true;
2526
2527 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2528 (psp_smu_reload_quirk(psp) ||
2529 psp->autoload_supported ||
2530 psp->pmfw_centralized_cstate_management))
2531 return true;
2532
2533 if (amdgpu_sriov_vf(psp->adev) &&
2534 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2535 return true;
2536
2537 if (psp->autoload_supported &&
2538 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2539 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2540 /* skip mec JT when autoload is enabled */
2541 return true;
2542
2543 return false;
2544 }
2545
psp_load_fw_list(struct psp_context * psp,struct amdgpu_firmware_info ** ucode_list,int ucode_count)2546 int psp_load_fw_list(struct psp_context *psp,
2547 struct amdgpu_firmware_info **ucode_list, int ucode_count)
2548 {
2549 int ret = 0, i;
2550 struct amdgpu_firmware_info *ucode;
2551
2552 for (i = 0; i < ucode_count; ++i) {
2553 ucode = ucode_list[i];
2554 psp_print_fw_hdr(psp, ucode);
2555 ret = psp_execute_ip_fw_load(psp, ucode);
2556 if (ret)
2557 return ret;
2558 }
2559 return ret;
2560 }
2561
psp_load_non_psp_fw(struct psp_context * psp)2562 static int psp_load_non_psp_fw(struct psp_context *psp)
2563 {
2564 int i, ret;
2565 struct amdgpu_firmware_info *ucode;
2566 struct amdgpu_device *adev = psp->adev;
2567
2568 if (psp->autoload_supported &&
2569 !psp->pmfw_centralized_cstate_management) {
2570 ret = psp_load_smu_fw(psp);
2571 if (ret)
2572 return ret;
2573 }
2574
2575 for (i = 0; i < adev->firmware.max_ucodes; i++) {
2576 ucode = &adev->firmware.ucode[i];
2577
2578 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2579 !fw_load_skip_check(psp, ucode)) {
2580 ret = psp_load_smu_fw(psp);
2581 if (ret)
2582 return ret;
2583 continue;
2584 }
2585
2586 if (fw_load_skip_check(psp, ucode))
2587 continue;
2588
2589 if (psp->autoload_supported &&
2590 (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7) ||
2591 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 11) ||
2592 adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 12)) &&
2593 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2594 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2595 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2596 /* PSP only receive one SDMA fw for sienna_cichlid,
2597 * as all four sdma fw are same
2598 */
2599 continue;
2600
2601 psp_print_fw_hdr(psp, ucode);
2602
2603 ret = psp_execute_ip_fw_load(psp, ucode);
2604 if (ret)
2605 return ret;
2606
2607 /* Start rlc autoload after psp recieved all the gfx firmware */
2608 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2609 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2610 ret = psp_rlc_autoload_start(psp);
2611 if (ret) {
2612 DRM_ERROR("Failed to start rlc autoload\n");
2613 return ret;
2614 }
2615 }
2616 }
2617
2618 return 0;
2619 }
2620
psp_load_fw(struct amdgpu_device * adev)2621 static int psp_load_fw(struct amdgpu_device *adev)
2622 {
2623 int ret;
2624 struct psp_context *psp = &adev->psp;
2625
2626 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2627 /* should not destroy ring, only stop */
2628 psp_ring_stop(psp, PSP_RING_TYPE__KM);
2629 } else {
2630 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2631
2632 ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2633 if (ret) {
2634 DRM_ERROR("PSP ring init failed!\n");
2635 goto failed;
2636 }
2637 }
2638
2639 ret = psp_hw_start(psp);
2640 if (ret)
2641 goto failed;
2642
2643 ret = psp_load_non_psp_fw(psp);
2644 if (ret)
2645 goto failed1;
2646
2647 ret = psp_asd_initialize(psp);
2648 if (ret) {
2649 DRM_ERROR("PSP load asd failed!\n");
2650 goto failed1;
2651 }
2652
2653 ret = psp_rl_load(adev);
2654 if (ret) {
2655 DRM_ERROR("PSP load RL failed!\n");
2656 goto failed1;
2657 }
2658
2659 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2660 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2661 ret = psp_xgmi_initialize(psp, false, true);
2662 /* Warning the XGMI seesion initialize failure
2663 * Instead of stop driver initialization
2664 */
2665 if (ret)
2666 dev_err(psp->adev->dev,
2667 "XGMI: Failed to initialize XGMI session\n");
2668 }
2669 }
2670
2671 if (psp->ta_fw) {
2672 ret = psp_ras_initialize(psp);
2673 if (ret)
2674 dev_err(psp->adev->dev,
2675 "RAS: Failed to initialize RAS\n");
2676
2677 ret = psp_hdcp_initialize(psp);
2678 if (ret)
2679 dev_err(psp->adev->dev,
2680 "HDCP: Failed to initialize HDCP\n");
2681
2682 ret = psp_dtm_initialize(psp);
2683 if (ret)
2684 dev_err(psp->adev->dev,
2685 "DTM: Failed to initialize DTM\n");
2686
2687 ret = psp_rap_initialize(psp);
2688 if (ret)
2689 dev_err(psp->adev->dev,
2690 "RAP: Failed to initialize RAP\n");
2691
2692 ret = psp_securedisplay_initialize(psp);
2693 if (ret)
2694 dev_err(psp->adev->dev,
2695 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2696 }
2697
2698 return 0;
2699
2700 failed1:
2701 psp_free_shared_bufs(psp);
2702 failed:
2703 /*
2704 * all cleanup jobs (xgmi terminate, ras terminate,
2705 * ring destroy, cmd/fence/fw buffers destory,
2706 * psp->cmd destory) are delayed to psp_hw_fini
2707 */
2708 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2709 return ret;
2710 }
2711
psp_hw_init(void * handle)2712 static int psp_hw_init(void *handle)
2713 {
2714 int ret;
2715 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2716
2717 mutex_lock(&adev->firmware.mutex);
2718 /*
2719 * This sequence is just used on hw_init only once, no need on
2720 * resume.
2721 */
2722 ret = amdgpu_ucode_init_bo(adev);
2723 if (ret)
2724 goto failed;
2725
2726 ret = psp_load_fw(adev);
2727 if (ret) {
2728 DRM_ERROR("PSP firmware loading failed\n");
2729 goto failed;
2730 }
2731
2732 mutex_unlock(&adev->firmware.mutex);
2733 return 0;
2734
2735 failed:
2736 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2737 mutex_unlock(&adev->firmware.mutex);
2738 return -EINVAL;
2739 }
2740
psp_hw_fini(void * handle)2741 static int psp_hw_fini(void *handle)
2742 {
2743 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2744 struct psp_context *psp = &adev->psp;
2745
2746 if (psp->ta_fw) {
2747 psp_ras_terminate(psp);
2748 psp_securedisplay_terminate(psp);
2749 psp_rap_terminate(psp);
2750 psp_dtm_terminate(psp);
2751 psp_hdcp_terminate(psp);
2752
2753 if (adev->gmc.xgmi.num_physical_nodes > 1)
2754 psp_xgmi_terminate(psp);
2755 }
2756
2757 psp_asd_terminate(psp);
2758 psp_tmr_terminate(psp);
2759
2760 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2761
2762 return 0;
2763 }
2764
psp_suspend(void * handle)2765 static int psp_suspend(void *handle)
2766 {
2767 int ret = 0;
2768 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2769 struct psp_context *psp = &adev->psp;
2770
2771 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
2772 psp->xgmi_context.context.initialized) {
2773 ret = psp_xgmi_terminate(psp);
2774 if (ret) {
2775 DRM_ERROR("Failed to terminate xgmi ta\n");
2776 goto out;
2777 }
2778 }
2779
2780 if (psp->ta_fw) {
2781 ret = psp_ras_terminate(psp);
2782 if (ret) {
2783 DRM_ERROR("Failed to terminate ras ta\n");
2784 goto out;
2785 }
2786 ret = psp_hdcp_terminate(psp);
2787 if (ret) {
2788 DRM_ERROR("Failed to terminate hdcp ta\n");
2789 goto out;
2790 }
2791 ret = psp_dtm_terminate(psp);
2792 if (ret) {
2793 DRM_ERROR("Failed to terminate dtm ta\n");
2794 goto out;
2795 }
2796 ret = psp_rap_terminate(psp);
2797 if (ret) {
2798 DRM_ERROR("Failed to terminate rap ta\n");
2799 goto out;
2800 }
2801 ret = psp_securedisplay_terminate(psp);
2802 if (ret) {
2803 DRM_ERROR("Failed to terminate securedisplay ta\n");
2804 goto out;
2805 }
2806 }
2807
2808 ret = psp_asd_terminate(psp);
2809 if (ret) {
2810 DRM_ERROR("Failed to terminate asd\n");
2811 goto out;
2812 }
2813
2814 ret = psp_tmr_terminate(psp);
2815 if (ret) {
2816 DRM_ERROR("Failed to terminate tmr\n");
2817 goto out;
2818 }
2819
2820 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2821 if (ret)
2822 DRM_ERROR("PSP ring stop failed\n");
2823
2824 out:
2825 return ret;
2826 }
2827
psp_resume(void * handle)2828 static int psp_resume(void *handle)
2829 {
2830 int ret;
2831 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2832 struct psp_context *psp = &adev->psp;
2833
2834 DRM_INFO("PSP is resuming...\n");
2835
2836 if (psp->mem_train_ctx.enable_mem_training) {
2837 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2838 if (ret) {
2839 DRM_ERROR("Failed to process memory training!\n");
2840 return ret;
2841 }
2842 }
2843
2844 mutex_lock(&adev->firmware.mutex);
2845
2846 ret = psp_hw_start(psp);
2847 if (ret)
2848 goto failed;
2849
2850 ret = psp_load_non_psp_fw(psp);
2851 if (ret)
2852 goto failed;
2853
2854 ret = psp_asd_initialize(psp);
2855 if (ret) {
2856 DRM_ERROR("PSP load asd failed!\n");
2857 goto failed;
2858 }
2859
2860 ret = psp_rl_load(adev);
2861 if (ret) {
2862 dev_err(adev->dev, "PSP load RL failed!\n");
2863 goto failed;
2864 }
2865
2866 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2867 ret = psp_xgmi_initialize(psp, false, true);
2868 /* Warning the XGMI seesion initialize failure
2869 * Instead of stop driver initialization
2870 */
2871 if (ret)
2872 dev_err(psp->adev->dev,
2873 "XGMI: Failed to initialize XGMI session\n");
2874 }
2875
2876 if (psp->ta_fw) {
2877 ret = psp_ras_initialize(psp);
2878 if (ret)
2879 dev_err(psp->adev->dev,
2880 "RAS: Failed to initialize RAS\n");
2881
2882 ret = psp_hdcp_initialize(psp);
2883 if (ret)
2884 dev_err(psp->adev->dev,
2885 "HDCP: Failed to initialize HDCP\n");
2886
2887 ret = psp_dtm_initialize(psp);
2888 if (ret)
2889 dev_err(psp->adev->dev,
2890 "DTM: Failed to initialize DTM\n");
2891
2892 ret = psp_rap_initialize(psp);
2893 if (ret)
2894 dev_err(psp->adev->dev,
2895 "RAP: Failed to initialize RAP\n");
2896
2897 ret = psp_securedisplay_initialize(psp);
2898 if (ret)
2899 dev_err(psp->adev->dev,
2900 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2901 }
2902
2903 mutex_unlock(&adev->firmware.mutex);
2904
2905 return 0;
2906
2907 failed:
2908 DRM_ERROR("PSP resume failed\n");
2909 mutex_unlock(&adev->firmware.mutex);
2910 return ret;
2911 }
2912
psp_gpu_reset(struct amdgpu_device * adev)2913 int psp_gpu_reset(struct amdgpu_device *adev)
2914 {
2915 int ret;
2916
2917 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
2918 return 0;
2919
2920 mutex_lock(&adev->psp.mutex);
2921 ret = psp_mode1_reset(&adev->psp);
2922 mutex_unlock(&adev->psp.mutex);
2923
2924 return ret;
2925 }
2926
psp_rlc_autoload_start(struct psp_context * psp)2927 int psp_rlc_autoload_start(struct psp_context *psp)
2928 {
2929 int ret;
2930 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2931
2932 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
2933
2934 ret = psp_cmd_submit_buf(psp, NULL, cmd,
2935 psp->fence_buf_mc_addr);
2936
2937 release_psp_cmd_buf(psp);
2938
2939 return ret;
2940 }
2941
psp_ring_cmd_submit(struct psp_context * psp,uint64_t cmd_buf_mc_addr,uint64_t fence_mc_addr,int index)2942 int psp_ring_cmd_submit(struct psp_context *psp,
2943 uint64_t cmd_buf_mc_addr,
2944 uint64_t fence_mc_addr,
2945 int index)
2946 {
2947 unsigned int psp_write_ptr_reg = 0;
2948 struct psp_gfx_rb_frame *write_frame;
2949 struct psp_ring *ring = &psp->km_ring;
2950 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
2951 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
2952 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
2953 struct amdgpu_device *adev = psp->adev;
2954 uint32_t ring_size_dw = ring->ring_size / 4;
2955 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
2956
2957 /* KM (GPCOM) prepare write pointer */
2958 psp_write_ptr_reg = psp_ring_get_wptr(psp);
2959
2960 /* Update KM RB frame pointer to new frame */
2961 /* write_frame ptr increments by size of rb_frame in bytes */
2962 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
2963 if ((psp_write_ptr_reg % ring_size_dw) == 0)
2964 write_frame = ring_buffer_start;
2965 else
2966 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
2967 /* Check invalid write_frame ptr address */
2968 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
2969 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
2970 ring_buffer_start, ring_buffer_end, write_frame);
2971 DRM_ERROR("write_frame is pointing to address out of bounds\n");
2972 return -EINVAL;
2973 }
2974
2975 /* Initialize KM RB frame */
2976 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
2977
2978 /* Update KM RB frame */
2979 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
2980 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
2981 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
2982 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
2983 write_frame->fence_value = index;
2984 amdgpu_device_flush_hdp(adev, NULL);
2985
2986 /* Update the write Pointer in DWORDs */
2987 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
2988 psp_ring_set_wptr(psp, psp_write_ptr_reg);
2989 return 0;
2990 }
2991
psp_init_asd_microcode(struct psp_context * psp,const char * chip_name)2992 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
2993 {
2994 struct amdgpu_device *adev = psp->adev;
2995 char fw_name[PSP_FW_NAME_LEN];
2996 const struct psp_firmware_header_v1_0 *asd_hdr;
2997 int err = 0;
2998
2999 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
3000 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
3001 if (err)
3002 goto out;
3003
3004 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3005 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3006 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3007 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3008 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3009 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3010 return 0;
3011 out:
3012 amdgpu_ucode_release(&adev->psp.asd_fw);
3013 return err;
3014 }
3015
psp_init_toc_microcode(struct psp_context * psp,const char * chip_name)3016 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3017 {
3018 struct amdgpu_device *adev = psp->adev;
3019 char fw_name[PSP_FW_NAME_LEN];
3020 const struct psp_firmware_header_v1_0 *toc_hdr;
3021 int err = 0;
3022
3023 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
3024 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
3025 if (err)
3026 goto out;
3027
3028 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3029 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3030 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3031 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3032 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3033 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3034 return 0;
3035 out:
3036 amdgpu_ucode_release(&adev->psp.toc_fw);
3037 return err;
3038 }
3039
parse_sos_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct psp_firmware_header_v2_0 * sos_hdr)3040 static int parse_sos_bin_descriptor(struct psp_context *psp,
3041 const struct psp_fw_bin_desc *desc,
3042 const struct psp_firmware_header_v2_0 *sos_hdr)
3043 {
3044 uint8_t *ucode_start_addr = NULL;
3045
3046 if (!psp || !desc || !sos_hdr)
3047 return -EINVAL;
3048
3049 ucode_start_addr = (uint8_t *)sos_hdr +
3050 le32_to_cpu(desc->offset_bytes) +
3051 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3052
3053 switch (desc->fw_type) {
3054 case PSP_FW_TYPE_PSP_SOS:
3055 psp->sos.fw_version = le32_to_cpu(desc->fw_version);
3056 psp->sos.feature_version = le32_to_cpu(desc->fw_version);
3057 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
3058 psp->sos.start_addr = ucode_start_addr;
3059 break;
3060 case PSP_FW_TYPE_PSP_SYS_DRV:
3061 psp->sys.fw_version = le32_to_cpu(desc->fw_version);
3062 psp->sys.feature_version = le32_to_cpu(desc->fw_version);
3063 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
3064 psp->sys.start_addr = ucode_start_addr;
3065 break;
3066 case PSP_FW_TYPE_PSP_KDB:
3067 psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
3068 psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
3069 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
3070 psp->kdb.start_addr = ucode_start_addr;
3071 break;
3072 case PSP_FW_TYPE_PSP_TOC:
3073 psp->toc.fw_version = le32_to_cpu(desc->fw_version);
3074 psp->toc.feature_version = le32_to_cpu(desc->fw_version);
3075 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
3076 psp->toc.start_addr = ucode_start_addr;
3077 break;
3078 case PSP_FW_TYPE_PSP_SPL:
3079 psp->spl.fw_version = le32_to_cpu(desc->fw_version);
3080 psp->spl.feature_version = le32_to_cpu(desc->fw_version);
3081 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
3082 psp->spl.start_addr = ucode_start_addr;
3083 break;
3084 case PSP_FW_TYPE_PSP_RL:
3085 psp->rl.fw_version = le32_to_cpu(desc->fw_version);
3086 psp->rl.feature_version = le32_to_cpu(desc->fw_version);
3087 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
3088 psp->rl.start_addr = ucode_start_addr;
3089 break;
3090 case PSP_FW_TYPE_PSP_SOC_DRV:
3091 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
3092 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
3093 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3094 psp->soc_drv.start_addr = ucode_start_addr;
3095 break;
3096 case PSP_FW_TYPE_PSP_INTF_DRV:
3097 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
3098 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
3099 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3100 psp->intf_drv.start_addr = ucode_start_addr;
3101 break;
3102 case PSP_FW_TYPE_PSP_DBG_DRV:
3103 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
3104 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
3105 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3106 psp->dbg_drv.start_addr = ucode_start_addr;
3107 break;
3108 case PSP_FW_TYPE_PSP_RAS_DRV:
3109 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
3110 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
3111 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3112 psp->ras_drv.start_addr = ucode_start_addr;
3113 break;
3114 default:
3115 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3116 break;
3117 }
3118
3119 return 0;
3120 }
3121
psp_init_sos_base_fw(struct amdgpu_device * adev)3122 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3123 {
3124 const struct psp_firmware_header_v1_0 *sos_hdr;
3125 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3126 uint8_t *ucode_array_start_addr;
3127
3128 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3129 ucode_array_start_addr = (uint8_t *)sos_hdr +
3130 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3131
3132 if (adev->gmc.xgmi.connected_to_cpu ||
3133 (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2))) {
3134 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3135 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3136
3137 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3138 adev->psp.sys.start_addr = ucode_array_start_addr;
3139
3140 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3141 adev->psp.sos.start_addr = ucode_array_start_addr +
3142 le32_to_cpu(sos_hdr->sos.offset_bytes);
3143 } else {
3144 /* Load alternate PSP SOS FW */
3145 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3146
3147 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3148 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3149
3150 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3151 adev->psp.sys.start_addr = ucode_array_start_addr +
3152 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3153
3154 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3155 adev->psp.sos.start_addr = ucode_array_start_addr +
3156 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3157 }
3158
3159 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3160 dev_warn(adev->dev, "PSP SOS FW not available");
3161 return -EINVAL;
3162 }
3163
3164 return 0;
3165 }
3166
psp_init_sos_microcode(struct psp_context * psp,const char * chip_name)3167 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3168 {
3169 struct amdgpu_device *adev = psp->adev;
3170 char fw_name[PSP_FW_NAME_LEN];
3171 const struct psp_firmware_header_v1_0 *sos_hdr;
3172 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3173 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3174 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3175 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3176 int err = 0;
3177 uint8_t *ucode_array_start_addr;
3178 int fw_index = 0;
3179
3180 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
3181 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
3182 if (err)
3183 goto out;
3184
3185 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3186 ucode_array_start_addr = (uint8_t *)sos_hdr +
3187 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3188 amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3189
3190 switch (sos_hdr->header.header_version_major) {
3191 case 1:
3192 err = psp_init_sos_base_fw(adev);
3193 if (err)
3194 goto out;
3195
3196 if (sos_hdr->header.header_version_minor == 1) {
3197 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3198 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3199 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3200 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3201 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3202 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3203 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3204 }
3205 if (sos_hdr->header.header_version_minor == 2) {
3206 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3207 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3208 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3209 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3210 }
3211 if (sos_hdr->header.header_version_minor == 3) {
3212 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3213 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3214 adev->psp.toc.start_addr = ucode_array_start_addr +
3215 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3216 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3217 adev->psp.kdb.start_addr = ucode_array_start_addr +
3218 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3219 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3220 adev->psp.spl.start_addr = ucode_array_start_addr +
3221 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3222 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3223 adev->psp.rl.start_addr = ucode_array_start_addr +
3224 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3225 }
3226 break;
3227 case 2:
3228 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3229
3230 if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3231 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3232 err = -EINVAL;
3233 goto out;
3234 }
3235
3236 for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
3237 err = parse_sos_bin_descriptor(psp,
3238 &sos_hdr_v2_0->psp_fw_bin[fw_index],
3239 sos_hdr_v2_0);
3240 if (err)
3241 goto out;
3242 }
3243 break;
3244 default:
3245 dev_err(adev->dev,
3246 "unsupported psp sos firmware\n");
3247 err = -EINVAL;
3248 goto out;
3249 }
3250
3251 return 0;
3252 out:
3253 amdgpu_ucode_release(&adev->psp.sos_fw);
3254
3255 return err;
3256 }
3257
parse_ta_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct ta_firmware_header_v2_0 * ta_hdr)3258 static int parse_ta_bin_descriptor(struct psp_context *psp,
3259 const struct psp_fw_bin_desc *desc,
3260 const struct ta_firmware_header_v2_0 *ta_hdr)
3261 {
3262 uint8_t *ucode_start_addr = NULL;
3263
3264 if (!psp || !desc || !ta_hdr)
3265 return -EINVAL;
3266
3267 ucode_start_addr = (uint8_t *)ta_hdr +
3268 le32_to_cpu(desc->offset_bytes) +
3269 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3270
3271 switch (desc->fw_type) {
3272 case TA_FW_TYPE_PSP_ASD:
3273 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3274 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
3275 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3276 psp->asd_context.bin_desc.start_addr = ucode_start_addr;
3277 break;
3278 case TA_FW_TYPE_PSP_XGMI:
3279 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3280 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3281 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
3282 break;
3283 case TA_FW_TYPE_PSP_RAS:
3284 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3285 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3286 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
3287 break;
3288 case TA_FW_TYPE_PSP_HDCP:
3289 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3290 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3291 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
3292 break;
3293 case TA_FW_TYPE_PSP_DTM:
3294 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3295 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3296 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
3297 break;
3298 case TA_FW_TYPE_PSP_RAP:
3299 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3300 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3301 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
3302 break;
3303 case TA_FW_TYPE_PSP_SECUREDISPLAY:
3304 psp->securedisplay_context.context.bin_desc.fw_version =
3305 le32_to_cpu(desc->fw_version);
3306 psp->securedisplay_context.context.bin_desc.size_bytes =
3307 le32_to_cpu(desc->size_bytes);
3308 psp->securedisplay_context.context.bin_desc.start_addr =
3309 ucode_start_addr;
3310 break;
3311 default:
3312 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3313 break;
3314 }
3315
3316 return 0;
3317 }
3318
parse_ta_v1_microcode(struct psp_context * psp)3319 static int parse_ta_v1_microcode(struct psp_context *psp)
3320 {
3321 const struct ta_firmware_header_v1_0 *ta_hdr;
3322 struct amdgpu_device *adev = psp->adev;
3323
3324 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3325
3326 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3327 return -EINVAL;
3328
3329 adev->psp.xgmi_context.context.bin_desc.fw_version =
3330 le32_to_cpu(ta_hdr->xgmi.fw_version);
3331 adev->psp.xgmi_context.context.bin_desc.size_bytes =
3332 le32_to_cpu(ta_hdr->xgmi.size_bytes);
3333 adev->psp.xgmi_context.context.bin_desc.start_addr =
3334 (uint8_t *)ta_hdr +
3335 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3336
3337 adev->psp.ras_context.context.bin_desc.fw_version =
3338 le32_to_cpu(ta_hdr->ras.fw_version);
3339 adev->psp.ras_context.context.bin_desc.size_bytes =
3340 le32_to_cpu(ta_hdr->ras.size_bytes);
3341 adev->psp.ras_context.context.bin_desc.start_addr =
3342 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3343 le32_to_cpu(ta_hdr->ras.offset_bytes);
3344
3345 adev->psp.hdcp_context.context.bin_desc.fw_version =
3346 le32_to_cpu(ta_hdr->hdcp.fw_version);
3347 adev->psp.hdcp_context.context.bin_desc.size_bytes =
3348 le32_to_cpu(ta_hdr->hdcp.size_bytes);
3349 adev->psp.hdcp_context.context.bin_desc.start_addr =
3350 (uint8_t *)ta_hdr +
3351 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3352
3353 adev->psp.dtm_context.context.bin_desc.fw_version =
3354 le32_to_cpu(ta_hdr->dtm.fw_version);
3355 adev->psp.dtm_context.context.bin_desc.size_bytes =
3356 le32_to_cpu(ta_hdr->dtm.size_bytes);
3357 adev->psp.dtm_context.context.bin_desc.start_addr =
3358 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3359 le32_to_cpu(ta_hdr->dtm.offset_bytes);
3360
3361 adev->psp.securedisplay_context.context.bin_desc.fw_version =
3362 le32_to_cpu(ta_hdr->securedisplay.fw_version);
3363 adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3364 le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3365 adev->psp.securedisplay_context.context.bin_desc.start_addr =
3366 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3367 le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3368
3369 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3370
3371 return 0;
3372 }
3373
parse_ta_v2_microcode(struct psp_context * psp)3374 static int parse_ta_v2_microcode(struct psp_context *psp)
3375 {
3376 const struct ta_firmware_header_v2_0 *ta_hdr;
3377 struct amdgpu_device *adev = psp->adev;
3378 int err = 0;
3379 int ta_index = 0;
3380
3381 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3382
3383 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3384 return -EINVAL;
3385
3386 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3387 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3388 return -EINVAL;
3389 }
3390
3391 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3392 err = parse_ta_bin_descriptor(psp,
3393 &ta_hdr->ta_fw_bin[ta_index],
3394 ta_hdr);
3395 if (err)
3396 return err;
3397 }
3398
3399 return 0;
3400 }
3401
psp_init_ta_microcode(struct psp_context * psp,const char * chip_name)3402 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3403 {
3404 const struct common_firmware_header *hdr;
3405 struct amdgpu_device *adev = psp->adev;
3406 char fw_name[PSP_FW_NAME_LEN];
3407 int err;
3408
3409 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
3410 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
3411 if (err)
3412 return err;
3413
3414 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3415 switch (le16_to_cpu(hdr->header_version_major)) {
3416 case 1:
3417 err = parse_ta_v1_microcode(psp);
3418 break;
3419 case 2:
3420 err = parse_ta_v2_microcode(psp);
3421 break;
3422 default:
3423 dev_err(adev->dev, "unsupported TA header version\n");
3424 err = -EINVAL;
3425 }
3426
3427 if (err)
3428 amdgpu_ucode_release(&adev->psp.ta_fw);
3429
3430 return err;
3431 }
3432
psp_init_cap_microcode(struct psp_context * psp,const char * chip_name)3433 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3434 {
3435 struct amdgpu_device *adev = psp->adev;
3436 char fw_name[PSP_FW_NAME_LEN];
3437 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3438 struct amdgpu_firmware_info *info = NULL;
3439 int err = 0;
3440
3441 if (!amdgpu_sriov_vf(adev)) {
3442 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3443 return -EINVAL;
3444 }
3445
3446 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
3447 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
3448 if (err) {
3449 if (err == -ENODEV) {
3450 dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3451 err = 0;
3452 } else {
3453 dev_err(adev->dev, "fail to initialize cap microcode\n");
3454 }
3455 goto out;
3456 }
3457
3458 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3459 info->ucode_id = AMDGPU_UCODE_ID_CAP;
3460 info->fw = adev->psp.cap_fw;
3461 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3462 adev->psp.cap_fw->data;
3463 adev->firmware.fw_size += ALIGN(
3464 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3465 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3466 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3467 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3468
3469 return 0;
3470
3471 out:
3472 amdgpu_ucode_release(&adev->psp.cap_fw);
3473 return err;
3474 }
3475
psp_set_clockgating_state(void * handle,enum amd_clockgating_state state)3476 static int psp_set_clockgating_state(void *handle,
3477 enum amd_clockgating_state state)
3478 {
3479 return 0;
3480 }
3481
psp_set_powergating_state(void * handle,enum amd_powergating_state state)3482 static int psp_set_powergating_state(void *handle,
3483 enum amd_powergating_state state)
3484 {
3485 return 0;
3486 }
3487
psp_usbc_pd_fw_sysfs_read(struct device * dev,struct device_attribute * attr,char * buf)3488 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3489 struct device_attribute *attr,
3490 char *buf)
3491 {
3492 struct drm_device *ddev = dev_get_drvdata(dev);
3493 struct amdgpu_device *adev = drm_to_adev(ddev);
3494 uint32_t fw_ver;
3495 int ret;
3496
3497 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3498 DRM_INFO("PSP block is not ready yet.");
3499 return -EBUSY;
3500 }
3501
3502 mutex_lock(&adev->psp.mutex);
3503 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3504 mutex_unlock(&adev->psp.mutex);
3505
3506 if (ret) {
3507 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
3508 return ret;
3509 }
3510
3511 return sysfs_emit(buf, "%x\n", fw_ver);
3512 }
3513
psp_usbc_pd_fw_sysfs_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3514 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3515 struct device_attribute *attr,
3516 const char *buf,
3517 size_t count)
3518 {
3519 struct drm_device *ddev = dev_get_drvdata(dev);
3520 struct amdgpu_device *adev = drm_to_adev(ddev);
3521 int ret, idx;
3522 char fw_name[100];
3523 const struct firmware *usbc_pd_fw;
3524 struct amdgpu_bo *fw_buf_bo = NULL;
3525 uint64_t fw_pri_mc_addr;
3526 void *fw_pri_cpu_addr;
3527
3528 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3529 DRM_INFO("PSP block is not ready yet.");
3530 return -EBUSY;
3531 }
3532
3533 if (!drm_dev_enter(ddev, &idx))
3534 return -ENODEV;
3535
3536 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
3537 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
3538 if (ret)
3539 goto fail;
3540
3541 /* LFB address which is aligned to 1MB boundary per PSP request */
3542 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3543 AMDGPU_GEM_DOMAIN_VRAM |
3544 AMDGPU_GEM_DOMAIN_GTT,
3545 &fw_buf_bo, &fw_pri_mc_addr,
3546 &fw_pri_cpu_addr);
3547 if (ret)
3548 goto rel_buf;
3549
3550 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3551
3552 mutex_lock(&adev->psp.mutex);
3553 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3554 mutex_unlock(&adev->psp.mutex);
3555
3556 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3557
3558 rel_buf:
3559 release_firmware(usbc_pd_fw);
3560 fail:
3561 if (ret) {
3562 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
3563 count = ret;
3564 }
3565
3566 drm_dev_exit(idx);
3567 return count;
3568 }
3569
psp_copy_fw(struct psp_context * psp,uint8_t * start_addr,uint32_t bin_size)3570 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3571 {
3572 int idx;
3573
3574 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3575 return;
3576
3577 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3578 memcpy(psp->fw_pri_buf, start_addr, bin_size);
3579
3580 drm_dev_exit(idx);
3581 }
3582
3583 /**
3584 * DOC: usbc_pd_fw
3585 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3586 * this file will trigger the update process.
3587 */
3588 static DEVICE_ATTR(usbc_pd_fw, 0644,
3589 psp_usbc_pd_fw_sysfs_read,
3590 psp_usbc_pd_fw_sysfs_write);
3591
is_psp_fw_valid(struct psp_bin_desc bin)3592 int is_psp_fw_valid(struct psp_bin_desc bin)
3593 {
3594 return bin.size_bytes;
3595 }
3596
amdgpu_psp_vbflash_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)3597 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3598 struct bin_attribute *bin_attr,
3599 char *buffer, loff_t pos, size_t count)
3600 {
3601 struct device *dev = kobj_to_dev(kobj);
3602 struct drm_device *ddev = dev_get_drvdata(dev);
3603 struct amdgpu_device *adev = drm_to_adev(ddev);
3604
3605 adev->psp.vbflash_done = false;
3606
3607 /* Safeguard against memory drain */
3608 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3609 dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B);
3610 kvfree(adev->psp.vbflash_tmp_buf);
3611 adev->psp.vbflash_tmp_buf = NULL;
3612 adev->psp.vbflash_image_size = 0;
3613 return -ENOMEM;
3614 }
3615
3616 /* TODO Just allocate max for now and optimize to realloc later if needed */
3617 if (!adev->psp.vbflash_tmp_buf) {
3618 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3619 if (!adev->psp.vbflash_tmp_buf)
3620 return -ENOMEM;
3621 }
3622
3623 mutex_lock(&adev->psp.mutex);
3624 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3625 adev->psp.vbflash_image_size += count;
3626 mutex_unlock(&adev->psp.mutex);
3627
3628 dev_dbg(adev->dev, "IFWI staged for update");
3629
3630 return count;
3631 }
3632
amdgpu_psp_vbflash_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)3633 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3634 struct bin_attribute *bin_attr, char *buffer,
3635 loff_t pos, size_t count)
3636 {
3637 struct device *dev = kobj_to_dev(kobj);
3638 struct drm_device *ddev = dev_get_drvdata(dev);
3639 struct amdgpu_device *adev = drm_to_adev(ddev);
3640 struct amdgpu_bo *fw_buf_bo = NULL;
3641 uint64_t fw_pri_mc_addr;
3642 void *fw_pri_cpu_addr;
3643 int ret;
3644
3645 if (adev->psp.vbflash_image_size == 0)
3646 return -EINVAL;
3647
3648 dev_dbg(adev->dev, "PSP IFWI flash process initiated");
3649
3650 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
3651 AMDGPU_GPU_PAGE_SIZE,
3652 AMDGPU_GEM_DOMAIN_VRAM,
3653 &fw_buf_bo,
3654 &fw_pri_mc_addr,
3655 &fw_pri_cpu_addr);
3656 if (ret)
3657 goto rel_buf;
3658
3659 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3660
3661 mutex_lock(&adev->psp.mutex);
3662 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3663 mutex_unlock(&adev->psp.mutex);
3664
3665 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3666
3667 rel_buf:
3668 kvfree(adev->psp.vbflash_tmp_buf);
3669 adev->psp.vbflash_tmp_buf = NULL;
3670 adev->psp.vbflash_image_size = 0;
3671
3672 if (ret) {
3673 dev_err(adev->dev, "Failed to load IFWI, err = %d", ret);
3674 return ret;
3675 }
3676
3677 dev_dbg(adev->dev, "PSP IFWI flash process done");
3678 return 0;
3679 }
3680
3681 /**
3682 * DOC: psp_vbflash
3683 * Writing to this file will stage an IFWI for update. Reading from this file
3684 * will trigger the update process.
3685 */
3686 static struct bin_attribute psp_vbflash_bin_attr = {
3687 .attr = {.name = "psp_vbflash", .mode = 0660},
3688 .size = 0,
3689 .write = amdgpu_psp_vbflash_write,
3690 .read = amdgpu_psp_vbflash_read,
3691 };
3692
3693 /**
3694 * DOC: psp_vbflash_status
3695 * The status of the flash process.
3696 * 0: IFWI flash not complete.
3697 * 1: IFWI flash complete.
3698 */
amdgpu_psp_vbflash_status(struct device * dev,struct device_attribute * attr,char * buf)3699 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3700 struct device_attribute *attr,
3701 char *buf)
3702 {
3703 struct drm_device *ddev = dev_get_drvdata(dev);
3704 struct amdgpu_device *adev = drm_to_adev(ddev);
3705 uint32_t vbflash_status;
3706
3707 vbflash_status = psp_vbflash_status(&adev->psp);
3708 if (!adev->psp.vbflash_done)
3709 vbflash_status = 0;
3710 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3711 vbflash_status = 1;
3712
3713 return sysfs_emit(buf, "0x%x\n", vbflash_status);
3714 }
3715 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3716
3717 static struct bin_attribute *bin_flash_attrs[] = {
3718 &psp_vbflash_bin_attr,
3719 NULL
3720 };
3721
3722 static struct attribute *flash_attrs[] = {
3723 &dev_attr_psp_vbflash_status.attr,
3724 &dev_attr_usbc_pd_fw.attr,
3725 NULL
3726 };
3727
amdgpu_flash_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)3728 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3729 {
3730 struct device *dev = kobj_to_dev(kobj);
3731 struct drm_device *ddev = dev_get_drvdata(dev);
3732 struct amdgpu_device *adev = drm_to_adev(ddev);
3733
3734 if (attr == &dev_attr_usbc_pd_fw.attr)
3735 return adev->psp.sup_pd_fw_up ? 0660 : 0;
3736
3737 return adev->psp.sup_ifwi_up ? 0440 : 0;
3738 }
3739
amdgpu_bin_flash_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int idx)3740 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
3741 struct bin_attribute *attr,
3742 int idx)
3743 {
3744 struct device *dev = kobj_to_dev(kobj);
3745 struct drm_device *ddev = dev_get_drvdata(dev);
3746 struct amdgpu_device *adev = drm_to_adev(ddev);
3747
3748 return adev->psp.sup_ifwi_up ? 0660 : 0;
3749 }
3750
3751 const struct attribute_group amdgpu_flash_attr_group = {
3752 .attrs = flash_attrs,
3753 .bin_attrs = bin_flash_attrs,
3754 .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
3755 .is_visible = amdgpu_flash_attr_is_visible,
3756 };
3757
3758 const struct amd_ip_funcs psp_ip_funcs = {
3759 .name = "psp",
3760 .early_init = psp_early_init,
3761 .late_init = NULL,
3762 .sw_init = psp_sw_init,
3763 .sw_fini = psp_sw_fini,
3764 .hw_init = psp_hw_init,
3765 .hw_fini = psp_hw_fini,
3766 .suspend = psp_suspend,
3767 .resume = psp_resume,
3768 .is_idle = NULL,
3769 .check_soft_reset = NULL,
3770 .wait_for_idle = NULL,
3771 .soft_reset = NULL,
3772 .set_clockgating_state = psp_set_clockgating_state,
3773 .set_powergating_state = psp_set_powergating_state,
3774 };
3775
3776 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
3777 .type = AMD_IP_BLOCK_TYPE_PSP,
3778 .major = 3,
3779 .minor = 1,
3780 .rev = 0,
3781 .funcs = &psp_ip_funcs,
3782 };
3783
3784 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
3785 .type = AMD_IP_BLOCK_TYPE_PSP,
3786 .major = 10,
3787 .minor = 0,
3788 .rev = 0,
3789 .funcs = &psp_ip_funcs,
3790 };
3791
3792 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
3793 .type = AMD_IP_BLOCK_TYPE_PSP,
3794 .major = 11,
3795 .minor = 0,
3796 .rev = 0,
3797 .funcs = &psp_ip_funcs,
3798 };
3799
3800 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
3801 .type = AMD_IP_BLOCK_TYPE_PSP,
3802 .major = 11,
3803 .minor = 0,
3804 .rev = 8,
3805 .funcs = &psp_ip_funcs,
3806 };
3807
3808 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
3809 .type = AMD_IP_BLOCK_TYPE_PSP,
3810 .major = 12,
3811 .minor = 0,
3812 .rev = 0,
3813 .funcs = &psp_ip_funcs,
3814 };
3815
3816 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
3817 .type = AMD_IP_BLOCK_TYPE_PSP,
3818 .major = 13,
3819 .minor = 0,
3820 .rev = 0,
3821 .funcs = &psp_ip_funcs,
3822 };
3823
3824 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
3825 .type = AMD_IP_BLOCK_TYPE_PSP,
3826 .major = 13,
3827 .minor = 0,
3828 .rev = 4,
3829 .funcs = &psp_ip_funcs,
3830 };
3831