1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <linux/dma-mapping.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "soc15_common.h"
33 #include "psp_v3_1.h"
34 #include "psp_v10_0.h"
35 #include "psp_v11_0.h"
36 #include "psp_v12_0.h"
37 
38 #include "amdgpu_ras.h"
39 
40 static int psp_sysfs_init(struct amdgpu_device *adev);
41 static void psp_sysfs_fini(struct amdgpu_device *adev);
42 
43 static int psp_load_smu_fw(struct psp_context *psp);
44 
45 /*
46  * Due to DF Cstate management centralized to PMFW, the firmware
47  * loading sequence will be updated as below:
48  *   - Load KDB
49  *   - Load SYS_DRV
50  *   - Load tOS
51  *   - Load PMFW
52  *   - Setup TMR
53  *   - Load other non-psp fw
54  *   - Load ASD
55  *   - Load XGMI/RAS/HDCP/DTM TA if any
56  *
57  * This new sequence is required for
58  *   - Arcturus
59  *   - Navi12 and onwards
60  */
61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
62 {
63 	struct amdgpu_device *adev = psp->adev;
64 
65 	psp->pmfw_centralized_cstate_management = false;
66 
67 	if (amdgpu_sriov_vf(adev))
68 		return;
69 
70 	if (adev->flags & AMD_IS_APU)
71 		return;
72 
73 	if ((adev->asic_type == CHIP_ARCTURUS) ||
74 	    (adev->asic_type >= CHIP_NAVI12))
75 		psp->pmfw_centralized_cstate_management = true;
76 }
77 
78 static int psp_early_init(void *handle)
79 {
80 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
81 	struct psp_context *psp = &adev->psp;
82 
83 	switch (adev->asic_type) {
84 	case CHIP_VEGA10:
85 	case CHIP_VEGA12:
86 		psp_v3_1_set_psp_funcs(psp);
87 		psp->autoload_supported = false;
88 		break;
89 	case CHIP_RAVEN:
90 		psp_v10_0_set_psp_funcs(psp);
91 		psp->autoload_supported = false;
92 		break;
93 	case CHIP_VEGA20:
94 	case CHIP_ARCTURUS:
95 		psp_v11_0_set_psp_funcs(psp);
96 		psp->autoload_supported = false;
97 		break;
98 	case CHIP_NAVI10:
99 	case CHIP_NAVI14:
100 	case CHIP_NAVI12:
101 	case CHIP_SIENNA_CICHLID:
102 	case CHIP_NAVY_FLOUNDER:
103 		psp_v11_0_set_psp_funcs(psp);
104 		psp->autoload_supported = true;
105 		break;
106 	case CHIP_RENOIR:
107 		psp_v12_0_set_psp_funcs(psp);
108 		break;
109 	default:
110 		return -EINVAL;
111 	}
112 
113 	psp->adev = adev;
114 
115 	psp_check_pmfw_centralized_cstate_management(psp);
116 
117 	return 0;
118 }
119 
120 static void psp_memory_training_fini(struct psp_context *psp)
121 {
122 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
123 
124 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
125 	kfree(ctx->sys_cache);
126 	ctx->sys_cache = NULL;
127 }
128 
129 static int psp_memory_training_init(struct psp_context *psp)
130 {
131 	int ret;
132 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
133 
134 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
135 		DRM_DEBUG("memory training is not supported!\n");
136 		return 0;
137 	}
138 
139 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
140 	if (ctx->sys_cache == NULL) {
141 		DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
142 		ret = -ENOMEM;
143 		goto Err_out;
144 	}
145 
146 	DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
147 		  ctx->train_data_size,
148 		  ctx->p2c_train_data_offset,
149 		  ctx->c2p_train_data_offset);
150 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
151 	return 0;
152 
153 Err_out:
154 	psp_memory_training_fini(psp);
155 	return ret;
156 }
157 
158 static int psp_sw_init(void *handle)
159 {
160 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
161 	struct psp_context *psp = &adev->psp;
162 	int ret;
163 
164 	ret = psp_init_microcode(psp);
165 	if (ret) {
166 		DRM_ERROR("Failed to load psp firmware!\n");
167 		return ret;
168 	}
169 
170 	ret = psp_memory_training_init(psp);
171 	if (ret) {
172 		DRM_ERROR("Failed to initialize memory training!\n");
173 		return ret;
174 	}
175 	ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
176 	if (ret) {
177 		DRM_ERROR("Failed to process memory training!\n");
178 		return ret;
179 	}
180 
181 	if (adev->asic_type == CHIP_NAVI10) {
182 		ret= psp_sysfs_init(adev);
183 		if (ret) {
184 			return ret;
185 		}
186 	}
187 
188 	return 0;
189 }
190 
191 static int psp_sw_fini(void *handle)
192 {
193 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
194 
195 	psp_memory_training_fini(&adev->psp);
196 	if (adev->psp.sos_fw) {
197 		release_firmware(adev->psp.sos_fw);
198 		adev->psp.sos_fw = NULL;
199 	}
200 	if (adev->psp.asd_fw) {
201 		release_firmware(adev->psp.asd_fw);
202 		adev->psp.asd_fw = NULL;
203 	}
204 	if (adev->psp.ta_fw) {
205 		release_firmware(adev->psp.ta_fw);
206 		adev->psp.ta_fw = NULL;
207 	}
208 
209 	if (adev->asic_type == CHIP_NAVI10)
210 		psp_sysfs_fini(adev);
211 
212 	return 0;
213 }
214 
215 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
216 		 uint32_t reg_val, uint32_t mask, bool check_changed)
217 {
218 	uint32_t val;
219 	int i;
220 	struct amdgpu_device *adev = psp->adev;
221 
222 	for (i = 0; i < adev->usec_timeout; i++) {
223 		val = RREG32(reg_index);
224 		if (check_changed) {
225 			if (val != reg_val)
226 				return 0;
227 		} else {
228 			if ((val & mask) == reg_val)
229 				return 0;
230 		}
231 		udelay(1);
232 	}
233 
234 	return -ETIME;
235 }
236 
237 static int
238 psp_cmd_submit_buf(struct psp_context *psp,
239 		   struct amdgpu_firmware_info *ucode,
240 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
241 {
242 	int ret;
243 	int index;
244 	int timeout = 2000;
245 	bool ras_intr = false;
246 	bool skip_unsupport = false;
247 
248 	mutex_lock(&psp->mutex);
249 
250 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
251 
252 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
253 
254 	index = atomic_inc_return(&psp->fence_value);
255 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
256 	if (ret) {
257 		atomic_dec(&psp->fence_value);
258 		mutex_unlock(&psp->mutex);
259 		return ret;
260 	}
261 
262 	amdgpu_asic_invalidate_hdp(psp->adev, NULL);
263 	while (*((unsigned int *)psp->fence_buf) != index) {
264 		if (--timeout == 0)
265 			break;
266 		/*
267 		 * Shouldn't wait for timeout when err_event_athub occurs,
268 		 * because gpu reset thread triggered and lock resource should
269 		 * be released for psp resume sequence.
270 		 */
271 		ras_intr = amdgpu_ras_intr_triggered();
272 		if (ras_intr)
273 			break;
274 		msleep(1);
275 		amdgpu_asic_invalidate_hdp(psp->adev, NULL);
276 	}
277 
278 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
279 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
280 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
281 
282 	/* In some cases, psp response status is not 0 even there is no
283 	 * problem while the command is submitted. Some version of PSP FW
284 	 * doesn't write 0 to that field.
285 	 * So here we would like to only print a warning instead of an error
286 	 * during psp initialization to avoid breaking hw_init and it doesn't
287 	 * return -EINVAL.
288 	 */
289 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
290 		if (ucode)
291 			DRM_WARN("failed to load ucode id (%d) ",
292 				  ucode->ucode_id);
293 		DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
294 			 psp->cmd_buf_mem->cmd_id,
295 			 psp->cmd_buf_mem->resp.status);
296 		if (!timeout) {
297 			mutex_unlock(&psp->mutex);
298 			return -EINVAL;
299 		}
300 	}
301 
302 	/* get xGMI session id from response buffer */
303 	cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id;
304 
305 	if (ucode) {
306 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
307 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
308 	}
309 	mutex_unlock(&psp->mutex);
310 
311 	return ret;
312 }
313 
314 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
315 				 struct psp_gfx_cmd_resp *cmd,
316 				 uint64_t tmr_mc, uint32_t size)
317 {
318 	if (amdgpu_sriov_vf(psp->adev))
319 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
320 	else
321 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
322 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
323 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
324 	cmd->cmd.cmd_setup_tmr.buf_size = size;
325 }
326 
327 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
328 				      uint64_t pri_buf_mc, uint32_t size)
329 {
330 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
331 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
332 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
333 	cmd->cmd.cmd_load_toc.toc_size = size;
334 }
335 
336 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
337 static int psp_load_toc(struct psp_context *psp,
338 			uint32_t *tmr_size)
339 {
340 	int ret;
341 	struct psp_gfx_cmd_resp *cmd;
342 
343 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
344 	if (!cmd)
345 		return -ENOMEM;
346 	/* Copy toc to psp firmware private buffer */
347 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
348 	memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size);
349 
350 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size);
351 
352 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
353 				 psp->fence_buf_mc_addr);
354 	if (!ret)
355 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
356 	kfree(cmd);
357 	return ret;
358 }
359 
360 /* Set up Trusted Memory Region */
361 static int psp_tmr_init(struct psp_context *psp)
362 {
363 	int ret;
364 	int tmr_size;
365 	void *tmr_buf;
366 	void **pptr;
367 
368 	/*
369 	 * According to HW engineer, they prefer the TMR address be "naturally
370 	 * aligned" , e.g. the start address be an integer divide of TMR size.
371 	 *
372 	 * Note: this memory need be reserved till the driver
373 	 * uninitializes.
374 	 */
375 	tmr_size = PSP_TMR_SIZE;
376 
377 	/* For ASICs support RLC autoload, psp will parse the toc
378 	 * and calculate the total size of TMR needed */
379 	if (!amdgpu_sriov_vf(psp->adev) &&
380 	    psp->toc_start_addr &&
381 	    psp->toc_bin_size &&
382 	    psp->fw_pri_buf) {
383 		ret = psp_load_toc(psp, &tmr_size);
384 		if (ret) {
385 			DRM_ERROR("Failed to load toc\n");
386 			return ret;
387 		}
388 	}
389 
390 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
391 	ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
392 				      AMDGPU_GEM_DOMAIN_VRAM,
393 				      &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
394 
395 	return ret;
396 }
397 
398 static int psp_clear_vf_fw(struct psp_context *psp)
399 {
400 	int ret;
401 	struct psp_gfx_cmd_resp *cmd;
402 
403 	if (!amdgpu_sriov_vf(psp->adev) || psp->adev->asic_type != CHIP_NAVI12)
404 		return 0;
405 
406 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
407 	if (!cmd)
408 		return -ENOMEM;
409 
410 	cmd->cmd_id = GFX_CMD_ID_CLEAR_VF_FW;
411 
412 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
413 	kfree(cmd);
414 
415 	return ret;
416 }
417 
418 static bool psp_skip_tmr(struct psp_context *psp)
419 {
420 	switch (psp->adev->asic_type) {
421 	case CHIP_NAVI12:
422 	case CHIP_SIENNA_CICHLID:
423 		return true;
424 	default:
425 		return false;
426 	}
427 }
428 
429 static int psp_tmr_load(struct psp_context *psp)
430 {
431 	int ret;
432 	struct psp_gfx_cmd_resp *cmd;
433 
434 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
435 	 * Already set up by host driver.
436 	 */
437 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
438 		return 0;
439 
440 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
441 	if (!cmd)
442 		return -ENOMEM;
443 
444 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr,
445 			     amdgpu_bo_size(psp->tmr_bo));
446 	DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
447 		 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
448 
449 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
450 				 psp->fence_buf_mc_addr);
451 
452 	kfree(cmd);
453 
454 	return ret;
455 }
456 
457 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
458 					struct psp_gfx_cmd_resp *cmd)
459 {
460 	if (amdgpu_sriov_vf(psp->adev))
461 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
462 	else
463 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
464 }
465 
466 static int psp_tmr_unload(struct psp_context *psp)
467 {
468 	int ret;
469 	struct psp_gfx_cmd_resp *cmd;
470 
471 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
472 	if (!cmd)
473 		return -ENOMEM;
474 
475 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
476 	DRM_INFO("free PSP TMR buffer\n");
477 
478 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
479 				 psp->fence_buf_mc_addr);
480 
481 	kfree(cmd);
482 
483 	return ret;
484 }
485 
486 static int psp_tmr_terminate(struct psp_context *psp)
487 {
488 	int ret;
489 	void *tmr_buf;
490 	void **pptr;
491 
492 	ret = psp_tmr_unload(psp);
493 	if (ret)
494 		return ret;
495 
496 	/* free TMR memory buffer */
497 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
498 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
499 
500 	return 0;
501 }
502 
503 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
504 				uint64_t asd_mc, uint32_t size)
505 {
506 	cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
507 	cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
508 	cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
509 	cmd->cmd.cmd_load_ta.app_len = size;
510 
511 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
512 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
513 	cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
514 }
515 
516 static int psp_asd_load(struct psp_context *psp)
517 {
518 	int ret;
519 	struct psp_gfx_cmd_resp *cmd;
520 
521 	/* If PSP version doesn't match ASD version, asd loading will be failed.
522 	 * add workaround to bypass it for sriov now.
523 	 * TODO: add version check to make it common
524 	 */
525 	if (amdgpu_sriov_vf(psp->adev) ||
526 	    (psp->adev->asic_type == CHIP_NAVY_FLOUNDER))
527 		return 0;
528 
529 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
530 	if (!cmd)
531 		return -ENOMEM;
532 
533 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
534 	memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
535 
536 	psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
537 				  psp->asd_ucode_size);
538 
539 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
540 				 psp->fence_buf_mc_addr);
541 	if (!ret) {
542 		psp->asd_context.asd_initialized = true;
543 		psp->asd_context.session_id = cmd->resp.session_id;
544 	}
545 
546 	kfree(cmd);
547 
548 	return ret;
549 }
550 
551 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
552 				       uint32_t session_id)
553 {
554 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
555 	cmd->cmd.cmd_unload_ta.session_id = session_id;
556 }
557 
558 static int psp_asd_unload(struct psp_context *psp)
559 {
560 	int ret;
561 	struct psp_gfx_cmd_resp *cmd;
562 
563 	if (amdgpu_sriov_vf(psp->adev))
564 		return 0;
565 
566 	if (!psp->asd_context.asd_initialized)
567 		return 0;
568 
569 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
570 	if (!cmd)
571 		return -ENOMEM;
572 
573 	psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
574 
575 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
576 				 psp->fence_buf_mc_addr);
577 	if (!ret)
578 		psp->asd_context.asd_initialized = false;
579 
580 	kfree(cmd);
581 
582 	return ret;
583 }
584 
585 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
586 		uint32_t id, uint32_t value)
587 {
588 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
589 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
590 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
591 }
592 
593 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
594 		uint32_t value)
595 {
596 	struct psp_gfx_cmd_resp *cmd = NULL;
597 	int ret = 0;
598 
599 	if (reg >= PSP_REG_LAST)
600 		return -EINVAL;
601 
602 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
603 	if (!cmd)
604 		return -ENOMEM;
605 
606 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
607 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
608 
609 	kfree(cmd);
610 	return ret;
611 }
612 
613 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
614 				     uint64_t ta_bin_mc,
615 				     uint32_t ta_bin_size,
616 				     uint64_t ta_shared_mc,
617 				     uint32_t ta_shared_size)
618 {
619 	cmd->cmd_id 				= GFX_CMD_ID_LOAD_TA;
620 	cmd->cmd.cmd_load_ta.app_phy_addr_lo 	= lower_32_bits(ta_bin_mc);
621 	cmd->cmd.cmd_load_ta.app_phy_addr_hi 	= upper_32_bits(ta_bin_mc);
622 	cmd->cmd.cmd_load_ta.app_len 		= ta_bin_size;
623 
624 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
625 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
626 	cmd->cmd.cmd_load_ta.cmd_buf_len 	 = ta_shared_size;
627 }
628 
629 static int psp_xgmi_init_shared_buf(struct psp_context *psp)
630 {
631 	int ret;
632 
633 	/*
634 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
635 	 * physical) for xgmi ta <-> Driver
636 	 */
637 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
638 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
639 				      &psp->xgmi_context.xgmi_shared_bo,
640 				      &psp->xgmi_context.xgmi_shared_mc_addr,
641 				      &psp->xgmi_context.xgmi_shared_buf);
642 
643 	return ret;
644 }
645 
646 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
647 				       uint32_t ta_cmd_id,
648 				       uint32_t session_id)
649 {
650 	cmd->cmd_id 				= GFX_CMD_ID_INVOKE_CMD;
651 	cmd->cmd.cmd_invoke_cmd.session_id 	= session_id;
652 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id 	= ta_cmd_id;
653 }
654 
655 static int psp_ta_invoke(struct psp_context *psp,
656 		  uint32_t ta_cmd_id,
657 		  uint32_t session_id)
658 {
659 	int ret;
660 	struct psp_gfx_cmd_resp *cmd;
661 
662 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
663 	if (!cmd)
664 		return -ENOMEM;
665 
666 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
667 
668 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
669 				 psp->fence_buf_mc_addr);
670 
671 	kfree(cmd);
672 
673 	return ret;
674 }
675 
676 static int psp_xgmi_load(struct psp_context *psp)
677 {
678 	int ret;
679 	struct psp_gfx_cmd_resp *cmd;
680 
681 	/*
682 	 * TODO: bypass the loading in sriov for now
683 	 */
684 
685 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
686 	if (!cmd)
687 		return -ENOMEM;
688 
689 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
690 	memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
691 
692 	psp_prep_ta_load_cmd_buf(cmd,
693 				 psp->fw_pri_mc_addr,
694 				 psp->ta_xgmi_ucode_size,
695 				 psp->xgmi_context.xgmi_shared_mc_addr,
696 				 PSP_XGMI_SHARED_MEM_SIZE);
697 
698 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
699 				 psp->fence_buf_mc_addr);
700 
701 	if (!ret) {
702 		psp->xgmi_context.initialized = 1;
703 		psp->xgmi_context.session_id = cmd->resp.session_id;
704 	}
705 
706 	kfree(cmd);
707 
708 	return ret;
709 }
710 
711 static int psp_xgmi_unload(struct psp_context *psp)
712 {
713 	int ret;
714 	struct psp_gfx_cmd_resp *cmd;
715 	struct amdgpu_device *adev = psp->adev;
716 
717 	/* XGMI TA unload currently is not supported on Arcturus */
718 	if (adev->asic_type == CHIP_ARCTURUS)
719 		return 0;
720 
721 	/*
722 	 * TODO: bypass the unloading in sriov for now
723 	 */
724 
725 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
726 	if (!cmd)
727 		return -ENOMEM;
728 
729 	psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
730 
731 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
732 				 psp->fence_buf_mc_addr);
733 
734 	kfree(cmd);
735 
736 	return ret;
737 }
738 
739 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
740 {
741 	return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
742 }
743 
744 int psp_xgmi_terminate(struct psp_context *psp)
745 {
746 	int ret;
747 
748 	if (!psp->xgmi_context.initialized)
749 		return 0;
750 
751 	ret = psp_xgmi_unload(psp);
752 	if (ret)
753 		return ret;
754 
755 	psp->xgmi_context.initialized = 0;
756 
757 	/* free xgmi shared memory */
758 	amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
759 			&psp->xgmi_context.xgmi_shared_mc_addr,
760 			&psp->xgmi_context.xgmi_shared_buf);
761 
762 	return 0;
763 }
764 
765 int psp_xgmi_initialize(struct psp_context *psp)
766 {
767 	struct ta_xgmi_shared_memory *xgmi_cmd;
768 	int ret;
769 
770 	if (!psp->adev->psp.ta_fw ||
771 	    !psp->adev->psp.ta_xgmi_ucode_size ||
772 	    !psp->adev->psp.ta_xgmi_start_addr)
773 		return -ENOENT;
774 
775 	if (!psp->xgmi_context.initialized) {
776 		ret = psp_xgmi_init_shared_buf(psp);
777 		if (ret)
778 			return ret;
779 	}
780 
781 	/* Load XGMI TA */
782 	ret = psp_xgmi_load(psp);
783 	if (ret)
784 		return ret;
785 
786 	/* Initialize XGMI session */
787 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
788 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
789 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
790 
791 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
792 
793 	return ret;
794 }
795 
796 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
797 {
798 	struct ta_xgmi_shared_memory *xgmi_cmd;
799 	int ret;
800 
801 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
802 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
803 
804 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
805 
806 	/* Invoke xgmi ta to get hive id */
807 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
808 	if (ret)
809 		return ret;
810 
811 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
812 
813 	return 0;
814 }
815 
816 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
817 {
818 	struct ta_xgmi_shared_memory *xgmi_cmd;
819 	int ret;
820 
821 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
822 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
823 
824 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
825 
826 	/* Invoke xgmi ta to get the node id */
827 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
828 	if (ret)
829 		return ret;
830 
831 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
832 
833 	return 0;
834 }
835 
836 int psp_xgmi_get_topology_info(struct psp_context *psp,
837 			       int number_devices,
838 			       struct psp_xgmi_topology_info *topology)
839 {
840 	struct ta_xgmi_shared_memory *xgmi_cmd;
841 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
842 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
843 	int i;
844 	int ret;
845 
846 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
847 		return -EINVAL;
848 
849 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
850 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
851 
852 	/* Fill in the shared memory with topology information as input */
853 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
854 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
855 	topology_info_input->num_nodes = number_devices;
856 
857 	for (i = 0; i < topology_info_input->num_nodes; i++) {
858 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
859 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
860 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
861 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
862 	}
863 
864 	/* Invoke xgmi ta to get the topology information */
865 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
866 	if (ret)
867 		return ret;
868 
869 	/* Read the output topology information from the shared memory */
870 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
871 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
872 	for (i = 0; i < topology->num_nodes; i++) {
873 		topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
874 		topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
875 		topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
876 		topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
877 	}
878 
879 	return 0;
880 }
881 
882 int psp_xgmi_set_topology_info(struct psp_context *psp,
883 			       int number_devices,
884 			       struct psp_xgmi_topology_info *topology)
885 {
886 	struct ta_xgmi_shared_memory *xgmi_cmd;
887 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
888 	int i;
889 
890 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
891 		return -EINVAL;
892 
893 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
894 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
895 
896 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
897 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
898 	topology_info_input->num_nodes = number_devices;
899 
900 	for (i = 0; i < topology_info_input->num_nodes; i++) {
901 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
902 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
903 		topology_info_input->nodes[i].is_sharing_enabled = 1;
904 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
905 	}
906 
907 	/* Invoke xgmi ta to set topology information */
908 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
909 }
910 
911 // ras begin
912 static int psp_ras_init_shared_buf(struct psp_context *psp)
913 {
914 	int ret;
915 
916 	/*
917 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
918 	 * physical) for ras ta <-> Driver
919 	 */
920 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
921 			PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
922 			&psp->ras.ras_shared_bo,
923 			&psp->ras.ras_shared_mc_addr,
924 			&psp->ras.ras_shared_buf);
925 
926 	return ret;
927 }
928 
929 static int psp_ras_load(struct psp_context *psp)
930 {
931 	int ret;
932 	struct psp_gfx_cmd_resp *cmd;
933 
934 	/*
935 	 * TODO: bypass the loading in sriov for now
936 	 */
937 	if (amdgpu_sriov_vf(psp->adev))
938 		return 0;
939 
940 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
941 	if (!cmd)
942 		return -ENOMEM;
943 
944 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
945 	memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
946 
947 	psp_prep_ta_load_cmd_buf(cmd,
948 				 psp->fw_pri_mc_addr,
949 				 psp->ta_ras_ucode_size,
950 				 psp->ras.ras_shared_mc_addr,
951 				 PSP_RAS_SHARED_MEM_SIZE);
952 
953 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
954 			psp->fence_buf_mc_addr);
955 
956 	if (!ret) {
957 		psp->ras.ras_initialized = true;
958 		psp->ras.session_id = cmd->resp.session_id;
959 	}
960 
961 	kfree(cmd);
962 
963 	return ret;
964 }
965 
966 static int psp_ras_unload(struct psp_context *psp)
967 {
968 	int ret;
969 	struct psp_gfx_cmd_resp *cmd;
970 
971 	/*
972 	 * TODO: bypass the unloading in sriov for now
973 	 */
974 	if (amdgpu_sriov_vf(psp->adev))
975 		return 0;
976 
977 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
978 	if (!cmd)
979 		return -ENOMEM;
980 
981 	psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
982 
983 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
984 			psp->fence_buf_mc_addr);
985 
986 	kfree(cmd);
987 
988 	return ret;
989 }
990 
991 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
992 {
993 	struct ta_ras_shared_memory *ras_cmd;
994 	int ret;
995 
996 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
997 
998 	/*
999 	 * TODO: bypass the loading in sriov for now
1000 	 */
1001 	if (amdgpu_sriov_vf(psp->adev))
1002 		return 0;
1003 
1004 	ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
1005 
1006 	if (amdgpu_ras_intr_triggered())
1007 		return ret;
1008 
1009 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
1010 	{
1011 		DRM_WARN("RAS: Unsupported Interface");
1012 		return -EINVAL;
1013 	}
1014 
1015 	if (!ret) {
1016 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1017 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1018 
1019 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1020 		}
1021 		else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1022 			dev_warn(psp->adev->dev,
1023 				 "RAS internal register access blocked\n");
1024 	}
1025 
1026 	return ret;
1027 }
1028 
1029 int psp_ras_enable_features(struct psp_context *psp,
1030 		union ta_ras_cmd_input *info, bool enable)
1031 {
1032 	struct ta_ras_shared_memory *ras_cmd;
1033 	int ret;
1034 
1035 	if (!psp->ras.ras_initialized)
1036 		return -EINVAL;
1037 
1038 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
1039 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1040 
1041 	if (enable)
1042 		ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1043 	else
1044 		ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1045 
1046 	ras_cmd->ras_in_message = *info;
1047 
1048 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1049 	if (ret)
1050 		return -EINVAL;
1051 
1052 	return ras_cmd->ras_status;
1053 }
1054 
1055 static int psp_ras_terminate(struct psp_context *psp)
1056 {
1057 	int ret;
1058 
1059 	/*
1060 	 * TODO: bypass the terminate in sriov for now
1061 	 */
1062 	if (amdgpu_sriov_vf(psp->adev))
1063 		return 0;
1064 
1065 	if (!psp->ras.ras_initialized)
1066 		return 0;
1067 
1068 	ret = psp_ras_unload(psp);
1069 	if (ret)
1070 		return ret;
1071 
1072 	psp->ras.ras_initialized = false;
1073 
1074 	/* free ras shared memory */
1075 	amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
1076 			&psp->ras.ras_shared_mc_addr,
1077 			&psp->ras.ras_shared_buf);
1078 
1079 	return 0;
1080 }
1081 
1082 static int psp_ras_initialize(struct psp_context *psp)
1083 {
1084 	int ret;
1085 
1086 	/*
1087 	 * TODO: bypass the initialize in sriov for now
1088 	 */
1089 	if (amdgpu_sriov_vf(psp->adev))
1090 		return 0;
1091 
1092 	if (!psp->adev->psp.ta_ras_ucode_size ||
1093 	    !psp->adev->psp.ta_ras_start_addr) {
1094 		dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
1095 		return 0;
1096 	}
1097 
1098 	if (!psp->ras.ras_initialized) {
1099 		ret = psp_ras_init_shared_buf(psp);
1100 		if (ret)
1101 			return ret;
1102 	}
1103 
1104 	ret = psp_ras_load(psp);
1105 	if (ret)
1106 		return ret;
1107 
1108 	return 0;
1109 }
1110 
1111 int psp_ras_trigger_error(struct psp_context *psp,
1112 			  struct ta_ras_trigger_error_input *info)
1113 {
1114 	struct ta_ras_shared_memory *ras_cmd;
1115 	int ret;
1116 
1117 	if (!psp->ras.ras_initialized)
1118 		return -EINVAL;
1119 
1120 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
1121 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1122 
1123 	ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1124 	ras_cmd->ras_in_message.trigger_error = *info;
1125 
1126 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1127 	if (ret)
1128 		return -EINVAL;
1129 
1130 	/* If err_event_athub occurs error inject was successful, however
1131 	   return status from TA is no long reliable */
1132 	if (amdgpu_ras_intr_triggered())
1133 		return 0;
1134 
1135 	return ras_cmd->ras_status;
1136 }
1137 // ras end
1138 
1139 // HDCP start
1140 static int psp_hdcp_init_shared_buf(struct psp_context *psp)
1141 {
1142 	int ret;
1143 
1144 	/*
1145 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1146 	 * physical) for hdcp ta <-> Driver
1147 	 */
1148 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
1149 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1150 				      &psp->hdcp_context.hdcp_shared_bo,
1151 				      &psp->hdcp_context.hdcp_shared_mc_addr,
1152 				      &psp->hdcp_context.hdcp_shared_buf);
1153 
1154 	return ret;
1155 }
1156 
1157 static int psp_hdcp_load(struct psp_context *psp)
1158 {
1159 	int ret;
1160 	struct psp_gfx_cmd_resp *cmd;
1161 
1162 	/*
1163 	 * TODO: bypass the loading in sriov for now
1164 	 */
1165 	if (amdgpu_sriov_vf(psp->adev))
1166 		return 0;
1167 
1168 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1169 	if (!cmd)
1170 		return -ENOMEM;
1171 
1172 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1173 	memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
1174 	       psp->ta_hdcp_ucode_size);
1175 
1176 	psp_prep_ta_load_cmd_buf(cmd,
1177 				 psp->fw_pri_mc_addr,
1178 				 psp->ta_hdcp_ucode_size,
1179 				 psp->hdcp_context.hdcp_shared_mc_addr,
1180 				 PSP_HDCP_SHARED_MEM_SIZE);
1181 
1182 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1183 
1184 	if (!ret) {
1185 		psp->hdcp_context.hdcp_initialized = true;
1186 		psp->hdcp_context.session_id = cmd->resp.session_id;
1187 		mutex_init(&psp->hdcp_context.mutex);
1188 	}
1189 
1190 	kfree(cmd);
1191 
1192 	return ret;
1193 }
1194 static int psp_hdcp_initialize(struct psp_context *psp)
1195 {
1196 	int ret;
1197 
1198 	/*
1199 	 * TODO: bypass the initialize in sriov for now
1200 	 */
1201 	if (amdgpu_sriov_vf(psp->adev))
1202 		return 0;
1203 
1204 	if (!psp->adev->psp.ta_hdcp_ucode_size ||
1205 	    !psp->adev->psp.ta_hdcp_start_addr) {
1206 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1207 		return 0;
1208 	}
1209 
1210 	if (!psp->hdcp_context.hdcp_initialized) {
1211 		ret = psp_hdcp_init_shared_buf(psp);
1212 		if (ret)
1213 			return ret;
1214 	}
1215 
1216 	ret = psp_hdcp_load(psp);
1217 	if (ret)
1218 		return ret;
1219 
1220 	return 0;
1221 }
1222 
1223 static int psp_hdcp_unload(struct psp_context *psp)
1224 {
1225 	int ret;
1226 	struct psp_gfx_cmd_resp *cmd;
1227 
1228 	/*
1229 	 * TODO: bypass the unloading in sriov for now
1230 	 */
1231 	if (amdgpu_sriov_vf(psp->adev))
1232 		return 0;
1233 
1234 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1235 	if (!cmd)
1236 		return -ENOMEM;
1237 
1238 	psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
1239 
1240 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1241 
1242 	kfree(cmd);
1243 
1244 	return ret;
1245 }
1246 
1247 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1248 {
1249 	/*
1250 	 * TODO: bypass the loading in sriov for now
1251 	 */
1252 	if (amdgpu_sriov_vf(psp->adev))
1253 		return 0;
1254 
1255 	return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
1256 }
1257 
1258 static int psp_hdcp_terminate(struct psp_context *psp)
1259 {
1260 	int ret;
1261 
1262 	/*
1263 	 * TODO: bypass the terminate in sriov for now
1264 	 */
1265 	if (amdgpu_sriov_vf(psp->adev))
1266 		return 0;
1267 
1268 	if (!psp->hdcp_context.hdcp_initialized)
1269 		return 0;
1270 
1271 	ret = psp_hdcp_unload(psp);
1272 	if (ret)
1273 		return ret;
1274 
1275 	psp->hdcp_context.hdcp_initialized = false;
1276 
1277 	/* free hdcp shared memory */
1278 	amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
1279 			      &psp->hdcp_context.hdcp_shared_mc_addr,
1280 			      &psp->hdcp_context.hdcp_shared_buf);
1281 
1282 	return 0;
1283 }
1284 // HDCP end
1285 
1286 // DTM start
1287 static int psp_dtm_init_shared_buf(struct psp_context *psp)
1288 {
1289 	int ret;
1290 
1291 	/*
1292 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1293 	 * physical) for dtm ta <-> Driver
1294 	 */
1295 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
1296 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1297 				      &psp->dtm_context.dtm_shared_bo,
1298 				      &psp->dtm_context.dtm_shared_mc_addr,
1299 				      &psp->dtm_context.dtm_shared_buf);
1300 
1301 	return ret;
1302 }
1303 
1304 static int psp_dtm_load(struct psp_context *psp)
1305 {
1306 	int ret;
1307 	struct psp_gfx_cmd_resp *cmd;
1308 
1309 	/*
1310 	 * TODO: bypass the loading in sriov for now
1311 	 */
1312 	if (amdgpu_sriov_vf(psp->adev))
1313 		return 0;
1314 
1315 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1316 	if (!cmd)
1317 		return -ENOMEM;
1318 
1319 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1320 	memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
1321 
1322 	psp_prep_ta_load_cmd_buf(cmd,
1323 				 psp->fw_pri_mc_addr,
1324 				 psp->ta_dtm_ucode_size,
1325 				 psp->dtm_context.dtm_shared_mc_addr,
1326 				 PSP_DTM_SHARED_MEM_SIZE);
1327 
1328 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1329 
1330 	if (!ret) {
1331 		psp->dtm_context.dtm_initialized = true;
1332 		psp->dtm_context.session_id = cmd->resp.session_id;
1333 		mutex_init(&psp->dtm_context.mutex);
1334 	}
1335 
1336 	kfree(cmd);
1337 
1338 	return ret;
1339 }
1340 
1341 static int psp_dtm_initialize(struct psp_context *psp)
1342 {
1343 	int ret;
1344 
1345 	/*
1346 	 * TODO: bypass the initialize in sriov for now
1347 	 */
1348 	if (amdgpu_sriov_vf(psp->adev))
1349 		return 0;
1350 
1351 	if (!psp->adev->psp.ta_dtm_ucode_size ||
1352 	    !psp->adev->psp.ta_dtm_start_addr) {
1353 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1354 		return 0;
1355 	}
1356 
1357 	if (!psp->dtm_context.dtm_initialized) {
1358 		ret = psp_dtm_init_shared_buf(psp);
1359 		if (ret)
1360 			return ret;
1361 	}
1362 
1363 	ret = psp_dtm_load(psp);
1364 	if (ret)
1365 		return ret;
1366 
1367 	return 0;
1368 }
1369 
1370 static int psp_dtm_unload(struct psp_context *psp)
1371 {
1372 	int ret;
1373 	struct psp_gfx_cmd_resp *cmd;
1374 
1375 	/*
1376 	 * TODO: bypass the unloading in sriov for now
1377 	 */
1378 	if (amdgpu_sriov_vf(psp->adev))
1379 		return 0;
1380 
1381 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1382 	if (!cmd)
1383 		return -ENOMEM;
1384 
1385 	psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
1386 
1387 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1388 
1389 	kfree(cmd);
1390 
1391 	return ret;
1392 }
1393 
1394 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1395 {
1396 	/*
1397 	 * TODO: bypass the loading in sriov for now
1398 	 */
1399 	if (amdgpu_sriov_vf(psp->adev))
1400 		return 0;
1401 
1402 	return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
1403 }
1404 
1405 static int psp_dtm_terminate(struct psp_context *psp)
1406 {
1407 	int ret;
1408 
1409 	/*
1410 	 * TODO: bypass the terminate in sriov for now
1411 	 */
1412 	if (amdgpu_sriov_vf(psp->adev))
1413 		return 0;
1414 
1415 	if (!psp->dtm_context.dtm_initialized)
1416 		return 0;
1417 
1418 	ret = psp_dtm_unload(psp);
1419 	if (ret)
1420 		return ret;
1421 
1422 	psp->dtm_context.dtm_initialized = false;
1423 
1424 	/* free hdcp shared memory */
1425 	amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
1426 			      &psp->dtm_context.dtm_shared_mc_addr,
1427 			      &psp->dtm_context.dtm_shared_buf);
1428 
1429 	return 0;
1430 }
1431 // DTM end
1432 
1433 static int psp_hw_start(struct psp_context *psp)
1434 {
1435 	struct amdgpu_device *adev = psp->adev;
1436 	int ret;
1437 
1438 	if (!amdgpu_sriov_vf(adev)) {
1439 		if (psp->kdb_bin_size &&
1440 		    (psp->funcs->bootloader_load_kdb != NULL)) {
1441 			ret = psp_bootloader_load_kdb(psp);
1442 			if (ret) {
1443 				DRM_ERROR("PSP load kdb failed!\n");
1444 				return ret;
1445 			}
1446 		}
1447 
1448 		if (psp->spl_bin_size) {
1449 			ret = psp_bootloader_load_spl(psp);
1450 			if (ret) {
1451 				DRM_ERROR("PSP load spl failed!\n");
1452 				return ret;
1453 			}
1454 		}
1455 
1456 		ret = psp_bootloader_load_sysdrv(psp);
1457 		if (ret) {
1458 			DRM_ERROR("PSP load sysdrv failed!\n");
1459 			return ret;
1460 		}
1461 
1462 		ret = psp_bootloader_load_sos(psp);
1463 		if (ret) {
1464 			DRM_ERROR("PSP load sos failed!\n");
1465 			return ret;
1466 		}
1467 	}
1468 
1469 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
1470 	if (ret) {
1471 		DRM_ERROR("PSP create ring failed!\n");
1472 		return ret;
1473 	}
1474 
1475 	ret = psp_clear_vf_fw(psp);
1476 	if (ret) {
1477 		DRM_ERROR("PSP clear vf fw!\n");
1478 		return ret;
1479 	}
1480 
1481 	ret = psp_tmr_init(psp);
1482 	if (ret) {
1483 		DRM_ERROR("PSP tmr init failed!\n");
1484 		return ret;
1485 	}
1486 
1487 	/*
1488 	 * For ASICs with DF Cstate management centralized
1489 	 * to PMFW, TMR setup should be performed after PMFW
1490 	 * loaded and before other non-psp firmware loaded.
1491 	 */
1492 	if (psp->pmfw_centralized_cstate_management) {
1493 		ret = psp_load_smu_fw(psp);
1494 		if (ret)
1495 			return ret;
1496 	}
1497 
1498 	ret = psp_tmr_load(psp);
1499 	if (ret) {
1500 		DRM_ERROR("PSP load tmr failed!\n");
1501 		return ret;
1502 	}
1503 
1504 	return 0;
1505 }
1506 
1507 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
1508 			   enum psp_gfx_fw_type *type)
1509 {
1510 	switch (ucode->ucode_id) {
1511 	case AMDGPU_UCODE_ID_SDMA0:
1512 		*type = GFX_FW_TYPE_SDMA0;
1513 		break;
1514 	case AMDGPU_UCODE_ID_SDMA1:
1515 		*type = GFX_FW_TYPE_SDMA1;
1516 		break;
1517 	case AMDGPU_UCODE_ID_SDMA2:
1518 		*type = GFX_FW_TYPE_SDMA2;
1519 		break;
1520 	case AMDGPU_UCODE_ID_SDMA3:
1521 		*type = GFX_FW_TYPE_SDMA3;
1522 		break;
1523 	case AMDGPU_UCODE_ID_SDMA4:
1524 		*type = GFX_FW_TYPE_SDMA4;
1525 		break;
1526 	case AMDGPU_UCODE_ID_SDMA5:
1527 		*type = GFX_FW_TYPE_SDMA5;
1528 		break;
1529 	case AMDGPU_UCODE_ID_SDMA6:
1530 		*type = GFX_FW_TYPE_SDMA6;
1531 		break;
1532 	case AMDGPU_UCODE_ID_SDMA7:
1533 		*type = GFX_FW_TYPE_SDMA7;
1534 		break;
1535 	case AMDGPU_UCODE_ID_CP_MES:
1536 		*type = GFX_FW_TYPE_CP_MES;
1537 		break;
1538 	case AMDGPU_UCODE_ID_CP_MES_DATA:
1539 		*type = GFX_FW_TYPE_MES_STACK;
1540 		break;
1541 	case AMDGPU_UCODE_ID_CP_CE:
1542 		*type = GFX_FW_TYPE_CP_CE;
1543 		break;
1544 	case AMDGPU_UCODE_ID_CP_PFP:
1545 		*type = GFX_FW_TYPE_CP_PFP;
1546 		break;
1547 	case AMDGPU_UCODE_ID_CP_ME:
1548 		*type = GFX_FW_TYPE_CP_ME;
1549 		break;
1550 	case AMDGPU_UCODE_ID_CP_MEC1:
1551 		*type = GFX_FW_TYPE_CP_MEC;
1552 		break;
1553 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
1554 		*type = GFX_FW_TYPE_CP_MEC_ME1;
1555 		break;
1556 	case AMDGPU_UCODE_ID_CP_MEC2:
1557 		*type = GFX_FW_TYPE_CP_MEC;
1558 		break;
1559 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
1560 		*type = GFX_FW_TYPE_CP_MEC_ME2;
1561 		break;
1562 	case AMDGPU_UCODE_ID_RLC_G:
1563 		*type = GFX_FW_TYPE_RLC_G;
1564 		break;
1565 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
1566 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
1567 		break;
1568 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
1569 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
1570 		break;
1571 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
1572 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
1573 		break;
1574 	case AMDGPU_UCODE_ID_SMC:
1575 		*type = GFX_FW_TYPE_SMU;
1576 		break;
1577 	case AMDGPU_UCODE_ID_UVD:
1578 		*type = GFX_FW_TYPE_UVD;
1579 		break;
1580 	case AMDGPU_UCODE_ID_UVD1:
1581 		*type = GFX_FW_TYPE_UVD1;
1582 		break;
1583 	case AMDGPU_UCODE_ID_VCE:
1584 		*type = GFX_FW_TYPE_VCE;
1585 		break;
1586 	case AMDGPU_UCODE_ID_VCN:
1587 		*type = GFX_FW_TYPE_VCN;
1588 		break;
1589 	case AMDGPU_UCODE_ID_VCN1:
1590 		*type = GFX_FW_TYPE_VCN1;
1591 		break;
1592 	case AMDGPU_UCODE_ID_DMCU_ERAM:
1593 		*type = GFX_FW_TYPE_DMCU_ERAM;
1594 		break;
1595 	case AMDGPU_UCODE_ID_DMCU_INTV:
1596 		*type = GFX_FW_TYPE_DMCU_ISR;
1597 		break;
1598 	case AMDGPU_UCODE_ID_VCN0_RAM:
1599 		*type = GFX_FW_TYPE_VCN0_RAM;
1600 		break;
1601 	case AMDGPU_UCODE_ID_VCN1_RAM:
1602 		*type = GFX_FW_TYPE_VCN1_RAM;
1603 		break;
1604 	case AMDGPU_UCODE_ID_DMCUB:
1605 		*type = GFX_FW_TYPE_DMUB;
1606 		break;
1607 	case AMDGPU_UCODE_ID_MAXIMUM:
1608 	default:
1609 		return -EINVAL;
1610 	}
1611 
1612 	return 0;
1613 }
1614 
1615 static void psp_print_fw_hdr(struct psp_context *psp,
1616 			     struct amdgpu_firmware_info *ucode)
1617 {
1618 	struct amdgpu_device *adev = psp->adev;
1619 	struct common_firmware_header *hdr;
1620 
1621 	switch (ucode->ucode_id) {
1622 	case AMDGPU_UCODE_ID_SDMA0:
1623 	case AMDGPU_UCODE_ID_SDMA1:
1624 	case AMDGPU_UCODE_ID_SDMA2:
1625 	case AMDGPU_UCODE_ID_SDMA3:
1626 	case AMDGPU_UCODE_ID_SDMA4:
1627 	case AMDGPU_UCODE_ID_SDMA5:
1628 	case AMDGPU_UCODE_ID_SDMA6:
1629 	case AMDGPU_UCODE_ID_SDMA7:
1630 		hdr = (struct common_firmware_header *)
1631 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
1632 		amdgpu_ucode_print_sdma_hdr(hdr);
1633 		break;
1634 	case AMDGPU_UCODE_ID_CP_CE:
1635 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
1636 		amdgpu_ucode_print_gfx_hdr(hdr);
1637 		break;
1638 	case AMDGPU_UCODE_ID_CP_PFP:
1639 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
1640 		amdgpu_ucode_print_gfx_hdr(hdr);
1641 		break;
1642 	case AMDGPU_UCODE_ID_CP_ME:
1643 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
1644 		amdgpu_ucode_print_gfx_hdr(hdr);
1645 		break;
1646 	case AMDGPU_UCODE_ID_CP_MEC1:
1647 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
1648 		amdgpu_ucode_print_gfx_hdr(hdr);
1649 		break;
1650 	case AMDGPU_UCODE_ID_RLC_G:
1651 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
1652 		amdgpu_ucode_print_rlc_hdr(hdr);
1653 		break;
1654 	case AMDGPU_UCODE_ID_SMC:
1655 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
1656 		amdgpu_ucode_print_smc_hdr(hdr);
1657 		break;
1658 	default:
1659 		break;
1660 	}
1661 }
1662 
1663 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
1664 				       struct psp_gfx_cmd_resp *cmd)
1665 {
1666 	int ret;
1667 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
1668 
1669 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
1670 
1671 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1672 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
1673 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
1674 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
1675 
1676 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
1677 	if (ret)
1678 		DRM_ERROR("Unknown firmware type\n");
1679 
1680 	return ret;
1681 }
1682 
1683 static int psp_execute_np_fw_load(struct psp_context *psp,
1684 			          struct amdgpu_firmware_info *ucode)
1685 {
1686 	int ret = 0;
1687 
1688 	ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd);
1689 	if (ret)
1690 		return ret;
1691 
1692 	ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
1693 				 psp->fence_buf_mc_addr);
1694 
1695 	return ret;
1696 }
1697 
1698 static int psp_load_smu_fw(struct psp_context *psp)
1699 {
1700 	int ret;
1701 	struct amdgpu_device* adev = psp->adev;
1702 	struct amdgpu_firmware_info *ucode =
1703 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
1704 	struct amdgpu_ras *ras = psp->ras.ras;
1705 
1706 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
1707 		return 0;
1708 
1709 
1710 	if (adev->in_gpu_reset && ras && ras->supported) {
1711 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
1712 		if (ret) {
1713 			DRM_WARN("Failed to set MP1 state prepare for reload\n");
1714 		}
1715 	}
1716 
1717 	ret = psp_execute_np_fw_load(psp, ucode);
1718 
1719 	if (ret)
1720 		DRM_ERROR("PSP load smu failed!\n");
1721 
1722 	return ret;
1723 }
1724 
1725 static bool fw_load_skip_check(struct psp_context *psp,
1726 			       struct amdgpu_firmware_info *ucode)
1727 {
1728 	if (!ucode->fw)
1729 		return true;
1730 
1731 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
1732 	    (psp_smu_reload_quirk(psp) ||
1733 	     psp->autoload_supported ||
1734 	     psp->pmfw_centralized_cstate_management))
1735 		return true;
1736 
1737 	if (amdgpu_sriov_vf(psp->adev) &&
1738 	   (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
1739 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
1740 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
1741 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
1742 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
1743 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
1744 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
1745 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
1746 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
1747 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
1748 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
1749 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
1750 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
1751 		/*skip ucode loading in SRIOV VF */
1752 		return true;
1753 
1754 	if (psp->autoload_supported &&
1755 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
1756 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
1757 		/* skip mec JT when autoload is enabled */
1758 		return true;
1759 
1760 	return false;
1761 }
1762 
1763 static int psp_np_fw_load(struct psp_context *psp)
1764 {
1765 	int i, ret;
1766 	struct amdgpu_firmware_info *ucode;
1767 	struct amdgpu_device* adev = psp->adev;
1768 
1769 	if (psp->autoload_supported &&
1770 	    !psp->pmfw_centralized_cstate_management) {
1771 		ret = psp_load_smu_fw(psp);
1772 		if (ret)
1773 			return ret;
1774 	}
1775 
1776 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
1777 		ucode = &adev->firmware.ucode[i];
1778 
1779 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
1780 		    !fw_load_skip_check(psp, ucode)) {
1781 			ret = psp_load_smu_fw(psp);
1782 			if (ret)
1783 				return ret;
1784 			continue;
1785 		}
1786 
1787 		if (fw_load_skip_check(psp, ucode))
1788 			continue;
1789 
1790 		if (psp->autoload_supported &&
1791 		    (adev->asic_type == CHIP_SIENNA_CICHLID ||
1792 		     adev->asic_type == CHIP_NAVY_FLOUNDER) &&
1793 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
1794 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
1795 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
1796 			/* PSP only receive one SDMA fw for sienna_cichlid,
1797 			 * as all four sdma fw are same */
1798 			continue;
1799 
1800 		psp_print_fw_hdr(psp, ucode);
1801 
1802 		ret = psp_execute_np_fw_load(psp, ucode);
1803 		if (ret)
1804 			return ret;
1805 
1806 		/* Start rlc autoload after psp recieved all the gfx firmware */
1807 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
1808 		    AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
1809 			ret = psp_rlc_autoload_start(psp);
1810 			if (ret) {
1811 				DRM_ERROR("Failed to start rlc autoload\n");
1812 				return ret;
1813 			}
1814 		}
1815 	}
1816 
1817 	return 0;
1818 }
1819 
1820 static int psp_load_fw(struct amdgpu_device *adev)
1821 {
1822 	int ret;
1823 	struct psp_context *psp = &adev->psp;
1824 
1825 	if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
1826 		psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
1827 		goto skip_memalloc;
1828 	}
1829 
1830 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1831 	if (!psp->cmd)
1832 		return -ENOMEM;
1833 
1834 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
1835 					AMDGPU_GEM_DOMAIN_GTT,
1836 					&psp->fw_pri_bo,
1837 					&psp->fw_pri_mc_addr,
1838 					&psp->fw_pri_buf);
1839 	if (ret)
1840 		goto failed;
1841 
1842 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
1843 					AMDGPU_GEM_DOMAIN_VRAM,
1844 					&psp->fence_buf_bo,
1845 					&psp->fence_buf_mc_addr,
1846 					&psp->fence_buf);
1847 	if (ret)
1848 		goto failed;
1849 
1850 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
1851 				      AMDGPU_GEM_DOMAIN_VRAM,
1852 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
1853 				      (void **)&psp->cmd_buf_mem);
1854 	if (ret)
1855 		goto failed;
1856 
1857 	memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
1858 
1859 	ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
1860 	if (ret) {
1861 		DRM_ERROR("PSP ring init failed!\n");
1862 		goto failed;
1863 	}
1864 
1865 skip_memalloc:
1866 	ret = psp_hw_start(psp);
1867 	if (ret)
1868 		goto failed;
1869 
1870 	ret = psp_np_fw_load(psp);
1871 	if (ret)
1872 		goto failed;
1873 
1874 	ret = psp_asd_load(psp);
1875 	if (ret) {
1876 		DRM_ERROR("PSP load asd failed!\n");
1877 		return ret;
1878 	}
1879 
1880 	if (psp->adev->psp.ta_fw) {
1881 		ret = psp_ras_initialize(psp);
1882 		if (ret)
1883 			dev_err(psp->adev->dev,
1884 					"RAS: Failed to initialize RAS\n");
1885 
1886 		ret = psp_hdcp_initialize(psp);
1887 		if (ret)
1888 			dev_err(psp->adev->dev,
1889 				"HDCP: Failed to initialize HDCP\n");
1890 
1891 		ret = psp_dtm_initialize(psp);
1892 		if (ret)
1893 			dev_err(psp->adev->dev,
1894 				"DTM: Failed to initialize DTM\n");
1895 	}
1896 
1897 	return 0;
1898 
1899 failed:
1900 	/*
1901 	 * all cleanup jobs (xgmi terminate, ras terminate,
1902 	 * ring destroy, cmd/fence/fw buffers destory,
1903 	 * psp->cmd destory) are delayed to psp_hw_fini
1904 	 */
1905 	return ret;
1906 }
1907 
1908 static int psp_hw_init(void *handle)
1909 {
1910 	int ret;
1911 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1912 
1913 	mutex_lock(&adev->firmware.mutex);
1914 	/*
1915 	 * This sequence is just used on hw_init only once, no need on
1916 	 * resume.
1917 	 */
1918 	ret = amdgpu_ucode_init_bo(adev);
1919 	if (ret)
1920 		goto failed;
1921 
1922 	ret = psp_load_fw(adev);
1923 	if (ret) {
1924 		DRM_ERROR("PSP firmware loading failed\n");
1925 		goto failed;
1926 	}
1927 
1928 	mutex_unlock(&adev->firmware.mutex);
1929 	return 0;
1930 
1931 failed:
1932 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
1933 	mutex_unlock(&adev->firmware.mutex);
1934 	return -EINVAL;
1935 }
1936 
1937 static int psp_hw_fini(void *handle)
1938 {
1939 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1940 	struct psp_context *psp = &adev->psp;
1941 	int ret;
1942 
1943 	if (psp->adev->psp.ta_fw) {
1944 		psp_ras_terminate(psp);
1945 		psp_dtm_terminate(psp);
1946 		psp_hdcp_terminate(psp);
1947 	}
1948 
1949 	psp_asd_unload(psp);
1950 	ret = psp_clear_vf_fw(psp);
1951 	if (ret) {
1952 		DRM_ERROR("PSP clear vf fw!\n");
1953 		return ret;
1954 	}
1955 
1956 	psp_tmr_terminate(psp);
1957 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
1958 
1959 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
1960 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
1961 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
1962 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
1963 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
1964 			      (void **)&psp->cmd_buf_mem);
1965 
1966 	kfree(psp->cmd);
1967 	psp->cmd = NULL;
1968 
1969 	return 0;
1970 }
1971 
1972 static int psp_suspend(void *handle)
1973 {
1974 	int ret;
1975 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1976 	struct psp_context *psp = &adev->psp;
1977 
1978 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1979 	    psp->xgmi_context.initialized == 1) {
1980 		ret = psp_xgmi_terminate(psp);
1981 		if (ret) {
1982 			DRM_ERROR("Failed to terminate xgmi ta\n");
1983 			return ret;
1984 		}
1985 	}
1986 
1987 	if (psp->adev->psp.ta_fw) {
1988 		ret = psp_ras_terminate(psp);
1989 		if (ret) {
1990 			DRM_ERROR("Failed to terminate ras ta\n");
1991 			return ret;
1992 		}
1993 		ret = psp_hdcp_terminate(psp);
1994 		if (ret) {
1995 			DRM_ERROR("Failed to terminate hdcp ta\n");
1996 			return ret;
1997 		}
1998 		ret = psp_dtm_terminate(psp);
1999 		if (ret) {
2000 			DRM_ERROR("Failed to terminate dtm ta\n");
2001 			return ret;
2002 		}
2003 	}
2004 
2005 	ret = psp_asd_unload(psp);
2006 	if (ret) {
2007 		DRM_ERROR("Failed to unload asd\n");
2008 		return ret;
2009 	}
2010 
2011 	ret = psp_tmr_terminate(psp);
2012 	if (ret) {
2013 		DRM_ERROR("Failed to terminate tmr\n");
2014 		return ret;
2015 	}
2016 
2017 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2018 	if (ret) {
2019 		DRM_ERROR("PSP ring stop failed\n");
2020 		return ret;
2021 	}
2022 
2023 	return 0;
2024 }
2025 
2026 static int psp_resume(void *handle)
2027 {
2028 	int ret;
2029 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2030 	struct psp_context *psp = &adev->psp;
2031 
2032 	DRM_INFO("PSP is resuming...\n");
2033 
2034 	ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2035 	if (ret) {
2036 		DRM_ERROR("Failed to process memory training!\n");
2037 		return ret;
2038 	}
2039 
2040 	mutex_lock(&adev->firmware.mutex);
2041 
2042 	ret = psp_hw_start(psp);
2043 	if (ret)
2044 		goto failed;
2045 
2046 	ret = psp_np_fw_load(psp);
2047 	if (ret)
2048 		goto failed;
2049 
2050 	ret = psp_asd_load(psp);
2051 	if (ret) {
2052 		DRM_ERROR("PSP load asd failed!\n");
2053 		goto failed;
2054 	}
2055 
2056 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2057 		ret = psp_xgmi_initialize(psp);
2058 		/* Warning the XGMI seesion initialize failure
2059 		 * Instead of stop driver initialization
2060 		 */
2061 		if (ret)
2062 			dev_err(psp->adev->dev,
2063 				"XGMI: Failed to initialize XGMI session\n");
2064 	}
2065 
2066 	if (psp->adev->psp.ta_fw) {
2067 		ret = psp_ras_initialize(psp);
2068 		if (ret)
2069 			dev_err(psp->adev->dev,
2070 					"RAS: Failed to initialize RAS\n");
2071 
2072 		ret = psp_hdcp_initialize(psp);
2073 		if (ret)
2074 			dev_err(psp->adev->dev,
2075 				"HDCP: Failed to initialize HDCP\n");
2076 
2077 		ret = psp_dtm_initialize(psp);
2078 		if (ret)
2079 			dev_err(psp->adev->dev,
2080 				"DTM: Failed to initialize DTM\n");
2081 	}
2082 
2083 	mutex_unlock(&adev->firmware.mutex);
2084 
2085 	return 0;
2086 
2087 failed:
2088 	DRM_ERROR("PSP resume failed\n");
2089 	mutex_unlock(&adev->firmware.mutex);
2090 	return ret;
2091 }
2092 
2093 int psp_gpu_reset(struct amdgpu_device *adev)
2094 {
2095 	int ret;
2096 
2097 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
2098 		return 0;
2099 
2100 	mutex_lock(&adev->psp.mutex);
2101 	ret = psp_mode1_reset(&adev->psp);
2102 	mutex_unlock(&adev->psp.mutex);
2103 
2104 	return ret;
2105 }
2106 
2107 int psp_rlc_autoload_start(struct psp_context *psp)
2108 {
2109 	int ret;
2110 	struct psp_gfx_cmd_resp *cmd;
2111 
2112 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
2113 	if (!cmd)
2114 		return -ENOMEM;
2115 
2116 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
2117 
2118 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
2119 				 psp->fence_buf_mc_addr);
2120 	kfree(cmd);
2121 	return ret;
2122 }
2123 
2124 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
2125 			uint64_t cmd_gpu_addr, int cmd_size)
2126 {
2127 	struct amdgpu_firmware_info ucode = {0};
2128 
2129 	ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
2130 		AMDGPU_UCODE_ID_VCN0_RAM;
2131 	ucode.mc_addr = cmd_gpu_addr;
2132 	ucode.ucode_size = cmd_size;
2133 
2134 	return psp_execute_np_fw_load(&adev->psp, &ucode);
2135 }
2136 
2137 int psp_ring_cmd_submit(struct psp_context *psp,
2138 			uint64_t cmd_buf_mc_addr,
2139 			uint64_t fence_mc_addr,
2140 			int index)
2141 {
2142 	unsigned int psp_write_ptr_reg = 0;
2143 	struct psp_gfx_rb_frame *write_frame;
2144 	struct psp_ring *ring = &psp->km_ring;
2145 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
2146 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
2147 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
2148 	struct amdgpu_device *adev = psp->adev;
2149 	uint32_t ring_size_dw = ring->ring_size / 4;
2150 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
2151 
2152 	/* KM (GPCOM) prepare write pointer */
2153 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
2154 
2155 	/* Update KM RB frame pointer to new frame */
2156 	/* write_frame ptr increments by size of rb_frame in bytes */
2157 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
2158 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
2159 		write_frame = ring_buffer_start;
2160 	else
2161 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
2162 	/* Check invalid write_frame ptr address */
2163 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
2164 		DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
2165 			  ring_buffer_start, ring_buffer_end, write_frame);
2166 		DRM_ERROR("write_frame is pointing to address out of bounds\n");
2167 		return -EINVAL;
2168 	}
2169 
2170 	/* Initialize KM RB frame */
2171 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
2172 
2173 	/* Update KM RB frame */
2174 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
2175 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
2176 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
2177 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
2178 	write_frame->fence_value = index;
2179 	amdgpu_asic_flush_hdp(adev, NULL);
2180 
2181 	/* Update the write Pointer in DWORDs */
2182 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
2183 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
2184 	return 0;
2185 }
2186 
2187 int psp_init_asd_microcode(struct psp_context *psp,
2188 			   const char *chip_name)
2189 {
2190 	struct amdgpu_device *adev = psp->adev;
2191 	char fw_name[30];
2192 	const struct psp_firmware_header_v1_0 *asd_hdr;
2193 	int err = 0;
2194 
2195 	if (!chip_name) {
2196 		dev_err(adev->dev, "invalid chip name for asd microcode\n");
2197 		return -EINVAL;
2198 	}
2199 
2200 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
2201 	err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
2202 	if (err)
2203 		goto out;
2204 
2205 	err = amdgpu_ucode_validate(adev->psp.asd_fw);
2206 	if (err)
2207 		goto out;
2208 
2209 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
2210 	adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
2211 	adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
2212 	adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
2213 	adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
2214 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
2215 	return 0;
2216 out:
2217 	dev_err(adev->dev, "fail to initialize asd microcode\n");
2218 	release_firmware(adev->psp.asd_fw);
2219 	adev->psp.asd_fw = NULL;
2220 	return err;
2221 }
2222 
2223 int psp_init_sos_microcode(struct psp_context *psp,
2224 			   const char *chip_name)
2225 {
2226 	struct amdgpu_device *adev = psp->adev;
2227 	char fw_name[30];
2228 	const struct psp_firmware_header_v1_0 *sos_hdr;
2229 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
2230 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
2231 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
2232 	int err = 0;
2233 
2234 	if (!chip_name) {
2235 		dev_err(adev->dev, "invalid chip name for sos microcode\n");
2236 		return -EINVAL;
2237 	}
2238 
2239 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
2240 	err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
2241 	if (err)
2242 		goto out;
2243 
2244 	err = amdgpu_ucode_validate(adev->psp.sos_fw);
2245 	if (err)
2246 		goto out;
2247 
2248 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
2249 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
2250 
2251 	switch (sos_hdr->header.header_version_major) {
2252 	case 1:
2253 		adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
2254 		adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
2255 		adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
2256 		adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
2257 		adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
2258 				le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
2259 		adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2260 				le32_to_cpu(sos_hdr->sos_offset_bytes);
2261 		if (sos_hdr->header.header_version_minor == 1) {
2262 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
2263 			adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
2264 			adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2265 					le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
2266 			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
2267 			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2268 					le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
2269 		}
2270 		if (sos_hdr->header.header_version_minor == 2) {
2271 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
2272 			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
2273 			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2274 						    le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
2275 		}
2276 		if (sos_hdr->header.header_version_minor == 3) {
2277 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
2278 			adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc_size_bytes);
2279 			adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2280 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc_offset_bytes);
2281 			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_size_bytes);
2282 			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2283 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_offset_bytes);
2284 			adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes);
2285 			adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2286 				le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes);
2287 		}
2288 		break;
2289 	default:
2290 		dev_err(adev->dev,
2291 			"unsupported psp sos firmware\n");
2292 		err = -EINVAL;
2293 		goto out;
2294 	}
2295 
2296 	return 0;
2297 out:
2298 	dev_err(adev->dev,
2299 		"failed to init sos firmware\n");
2300 	release_firmware(adev->psp.sos_fw);
2301 	adev->psp.sos_fw = NULL;
2302 
2303 	return err;
2304 }
2305 
2306 int parse_ta_bin_descriptor(struct psp_context *psp,
2307 			    const struct ta_fw_bin_desc *desc,
2308 			    const struct ta_firmware_header_v2_0 *ta_hdr)
2309 {
2310 	uint8_t *ucode_start_addr  = NULL;
2311 
2312 	if (!psp || !desc || !ta_hdr)
2313 		return -EINVAL;
2314 
2315 	ucode_start_addr  = (uint8_t *)ta_hdr +
2316 			    le32_to_cpu(desc->offset_bytes) +
2317 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
2318 
2319 	switch (desc->fw_type) {
2320 	case TA_FW_TYPE_PSP_ASD:
2321 		psp->asd_fw_version 	   = le32_to_cpu(desc->fw_version);
2322 		psp->asd_feature_version   = le32_to_cpu(desc->fw_version);
2323 		psp->asd_ucode_size 	   = le32_to_cpu(desc->size_bytes);
2324 		psp->asd_start_addr 	   = ucode_start_addr;
2325 		break;
2326 	case TA_FW_TYPE_PSP_XGMI:
2327 		psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
2328 		psp->ta_xgmi_ucode_size    = le32_to_cpu(desc->size_bytes);
2329 		psp->ta_xgmi_start_addr    = ucode_start_addr;
2330 		break;
2331 	case TA_FW_TYPE_PSP_RAS:
2332 		psp->ta_ras_ucode_version  = le32_to_cpu(desc->fw_version);
2333 		psp->ta_ras_ucode_size     = le32_to_cpu(desc->size_bytes);
2334 		psp->ta_ras_start_addr     = ucode_start_addr;
2335 		break;
2336 	case TA_FW_TYPE_PSP_HDCP:
2337 		psp->ta_hdcp_ucode_version = le32_to_cpu(desc->fw_version);
2338 		psp->ta_hdcp_ucode_size    = le32_to_cpu(desc->size_bytes);
2339 		psp->ta_hdcp_start_addr    = ucode_start_addr;
2340 		break;
2341 	case TA_FW_TYPE_PSP_DTM:
2342 		psp->ta_dtm_ucode_version  = le32_to_cpu(desc->fw_version);
2343 		psp->ta_dtm_ucode_size     = le32_to_cpu(desc->size_bytes);
2344 		psp->ta_dtm_start_addr     = ucode_start_addr;
2345 		break;
2346 	default:
2347 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
2348 		break;
2349 	}
2350 
2351 	return 0;
2352 }
2353 
2354 int psp_init_ta_microcode(struct psp_context *psp,
2355 			  const char *chip_name)
2356 {
2357 	struct amdgpu_device *adev = psp->adev;
2358 	char fw_name[30];
2359 	const struct ta_firmware_header_v2_0 *ta_hdr;
2360 	int err = 0;
2361 	int ta_index = 0;
2362 
2363 	if (!chip_name) {
2364 		dev_err(adev->dev, "invalid chip name for ta microcode\n");
2365 		return -EINVAL;
2366 	}
2367 
2368 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
2369 	err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
2370 	if (err)
2371 		goto out;
2372 
2373 	err = amdgpu_ucode_validate(adev->psp.ta_fw);
2374 	if (err)
2375 		goto out;
2376 
2377 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
2378 
2379 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) {
2380 		dev_err(adev->dev, "unsupported TA header version\n");
2381 		err = -EINVAL;
2382 		goto out;
2383 	}
2384 
2385 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_TA_PACKAGING) {
2386 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
2387 		err = -EINVAL;
2388 		goto out;
2389 	}
2390 
2391 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
2392 		err = parse_ta_bin_descriptor(psp,
2393 					      &ta_hdr->ta_fw_bin[ta_index],
2394 					      ta_hdr);
2395 		if (err)
2396 			goto out;
2397 	}
2398 
2399 	return 0;
2400 out:
2401 	dev_err(adev->dev, "fail to initialize ta microcode\n");
2402 	release_firmware(adev->psp.ta_fw);
2403 	adev->psp.ta_fw = NULL;
2404 	return err;
2405 }
2406 
2407 static int psp_set_clockgating_state(void *handle,
2408 				     enum amd_clockgating_state state)
2409 {
2410 	return 0;
2411 }
2412 
2413 static int psp_set_powergating_state(void *handle,
2414 				     enum amd_powergating_state state)
2415 {
2416 	return 0;
2417 }
2418 
2419 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
2420 					 struct device_attribute *attr,
2421 					 char *buf)
2422 {
2423 	struct drm_device *ddev = dev_get_drvdata(dev);
2424 	struct amdgpu_device *adev = ddev->dev_private;
2425 	uint32_t fw_ver;
2426 	int ret;
2427 
2428 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
2429 		DRM_INFO("PSP block is not ready yet.");
2430 		return -EBUSY;
2431 	}
2432 
2433 	mutex_lock(&adev->psp.mutex);
2434 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
2435 	mutex_unlock(&adev->psp.mutex);
2436 
2437 	if (ret) {
2438 		DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
2439 		return ret;
2440 	}
2441 
2442 	return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
2443 }
2444 
2445 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
2446 						       struct device_attribute *attr,
2447 						       const char *buf,
2448 						       size_t count)
2449 {
2450 	struct drm_device *ddev = dev_get_drvdata(dev);
2451 	struct amdgpu_device *adev = ddev->dev_private;
2452 	void *cpu_addr;
2453 	dma_addr_t dma_addr;
2454 	int ret;
2455 	char fw_name[100];
2456 	const struct firmware *usbc_pd_fw;
2457 
2458 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
2459 		DRM_INFO("PSP block is not ready yet.");
2460 		return -EBUSY;
2461 	}
2462 
2463 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
2464 	ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
2465 	if (ret)
2466 		goto fail;
2467 
2468 	/* We need contiguous physical mem to place the FW  for psp to access */
2469 	cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
2470 
2471 	ret = dma_mapping_error(adev->dev, dma_addr);
2472 	if (ret)
2473 		goto rel_buf;
2474 
2475 	memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
2476 
2477 	/*
2478 	 * x86 specific workaround.
2479 	 * Without it the buffer is invisible in PSP.
2480 	 *
2481 	 * TODO Remove once PSP starts snooping CPU cache
2482 	 */
2483 #ifdef CONFIG_X86
2484 	clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
2485 #endif
2486 
2487 	mutex_lock(&adev->psp.mutex);
2488 	ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
2489 	mutex_unlock(&adev->psp.mutex);
2490 
2491 rel_buf:
2492 	dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
2493 	release_firmware(usbc_pd_fw);
2494 
2495 fail:
2496 	if (ret) {
2497 		DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
2498 		return ret;
2499 	}
2500 
2501 	return count;
2502 }
2503 
2504 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
2505 		   psp_usbc_pd_fw_sysfs_read,
2506 		   psp_usbc_pd_fw_sysfs_write);
2507 
2508 
2509 
2510 const struct amd_ip_funcs psp_ip_funcs = {
2511 	.name = "psp",
2512 	.early_init = psp_early_init,
2513 	.late_init = NULL,
2514 	.sw_init = psp_sw_init,
2515 	.sw_fini = psp_sw_fini,
2516 	.hw_init = psp_hw_init,
2517 	.hw_fini = psp_hw_fini,
2518 	.suspend = psp_suspend,
2519 	.resume = psp_resume,
2520 	.is_idle = NULL,
2521 	.check_soft_reset = NULL,
2522 	.wait_for_idle = NULL,
2523 	.soft_reset = NULL,
2524 	.set_clockgating_state = psp_set_clockgating_state,
2525 	.set_powergating_state = psp_set_powergating_state,
2526 };
2527 
2528 static int psp_sysfs_init(struct amdgpu_device *adev)
2529 {
2530 	int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
2531 
2532 	if (ret)
2533 		DRM_ERROR("Failed to create USBC PD FW control file!");
2534 
2535 	return ret;
2536 }
2537 
2538 static void psp_sysfs_fini(struct amdgpu_device *adev)
2539 {
2540 	device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
2541 }
2542 
2543 const struct amdgpu_ip_block_version psp_v3_1_ip_block =
2544 {
2545 	.type = AMD_IP_BLOCK_TYPE_PSP,
2546 	.major = 3,
2547 	.minor = 1,
2548 	.rev = 0,
2549 	.funcs = &psp_ip_funcs,
2550 };
2551 
2552 const struct amdgpu_ip_block_version psp_v10_0_ip_block =
2553 {
2554 	.type = AMD_IP_BLOCK_TYPE_PSP,
2555 	.major = 10,
2556 	.minor = 0,
2557 	.rev = 0,
2558 	.funcs = &psp_ip_funcs,
2559 };
2560 
2561 const struct amdgpu_ip_block_version psp_v11_0_ip_block =
2562 {
2563 	.type = AMD_IP_BLOCK_TYPE_PSP,
2564 	.major = 11,
2565 	.minor = 0,
2566 	.rev = 0,
2567 	.funcs = &psp_ip_funcs,
2568 };
2569 
2570 const struct amdgpu_ip_block_version psp_v12_0_ip_block =
2571 {
2572 	.type = AMD_IP_BLOCK_TYPE_PSP,
2573 	.major = 12,
2574 	.minor = 0,
2575 	.rev = 0,
2576 	.funcs = &psp_ip_funcs,
2577 };
2578