1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <linux/dma-mapping.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "soc15_common.h"
33 #include "psp_v3_1.h"
34 #include "psp_v10_0.h"
35 #include "psp_v11_0.h"
36 #include "psp_v12_0.h"
37 
38 #include "amdgpu_ras.h"
39 
40 static int psp_sysfs_init(struct amdgpu_device *adev);
41 static void psp_sysfs_fini(struct amdgpu_device *adev);
42 
43 static int psp_load_smu_fw(struct psp_context *psp);
44 
45 /*
46  * Due to DF Cstate management centralized to PMFW, the firmware
47  * loading sequence will be updated as below:
48  *   - Load KDB
49  *   - Load SYS_DRV
50  *   - Load tOS
51  *   - Load PMFW
52  *   - Setup TMR
53  *   - Load other non-psp fw
54  *   - Load ASD
55  *   - Load XGMI/RAS/HDCP/DTM TA if any
56  *
57  * This new sequence is required for
58  *   - Arcturus
59  *   - Navi12 and onwards
60  */
61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
62 {
63 	struct amdgpu_device *adev = psp->adev;
64 
65 	psp->pmfw_centralized_cstate_management = false;
66 
67 	if (amdgpu_sriov_vf(adev))
68 		return;
69 
70 	if (adev->flags & AMD_IS_APU)
71 		return;
72 
73 	if ((adev->asic_type == CHIP_ARCTURUS) ||
74 	    (adev->asic_type >= CHIP_NAVI12))
75 		psp->pmfw_centralized_cstate_management = true;
76 }
77 
78 static int psp_early_init(void *handle)
79 {
80 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
81 	struct psp_context *psp = &adev->psp;
82 
83 	switch (adev->asic_type) {
84 	case CHIP_VEGA10:
85 	case CHIP_VEGA12:
86 		psp_v3_1_set_psp_funcs(psp);
87 		psp->autoload_supported = false;
88 		break;
89 	case CHIP_RAVEN:
90 		psp_v10_0_set_psp_funcs(psp);
91 		psp->autoload_supported = false;
92 		break;
93 	case CHIP_VEGA20:
94 	case CHIP_ARCTURUS:
95 		psp_v11_0_set_psp_funcs(psp);
96 		psp->autoload_supported = false;
97 		break;
98 	case CHIP_NAVI10:
99 	case CHIP_NAVI14:
100 	case CHIP_NAVI12:
101 	case CHIP_SIENNA_CICHLID:
102 	case CHIP_NAVY_FLOUNDER:
103 		psp_v11_0_set_psp_funcs(psp);
104 		psp->autoload_supported = true;
105 		break;
106 	case CHIP_RENOIR:
107 		psp_v12_0_set_psp_funcs(psp);
108 		break;
109 	default:
110 		return -EINVAL;
111 	}
112 
113 	psp->adev = adev;
114 
115 	psp_check_pmfw_centralized_cstate_management(psp);
116 
117 	return 0;
118 }
119 
120 static void psp_memory_training_fini(struct psp_context *psp)
121 {
122 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
123 
124 	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
125 	kfree(ctx->sys_cache);
126 	ctx->sys_cache = NULL;
127 }
128 
129 static int psp_memory_training_init(struct psp_context *psp)
130 {
131 	int ret;
132 	struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
133 
134 	if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
135 		DRM_DEBUG("memory training is not supported!\n");
136 		return 0;
137 	}
138 
139 	ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
140 	if (ctx->sys_cache == NULL) {
141 		DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
142 		ret = -ENOMEM;
143 		goto Err_out;
144 	}
145 
146 	DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
147 		  ctx->train_data_size,
148 		  ctx->p2c_train_data_offset,
149 		  ctx->c2p_train_data_offset);
150 	ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
151 	return 0;
152 
153 Err_out:
154 	psp_memory_training_fini(psp);
155 	return ret;
156 }
157 
158 static int psp_sw_init(void *handle)
159 {
160 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
161 	struct psp_context *psp = &adev->psp;
162 	int ret;
163 
164 	ret = psp_init_microcode(psp);
165 	if (ret) {
166 		DRM_ERROR("Failed to load psp firmware!\n");
167 		return ret;
168 	}
169 
170 	ret = psp_memory_training_init(psp);
171 	if (ret) {
172 		DRM_ERROR("Failed to initialize memory training!\n");
173 		return ret;
174 	}
175 	ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
176 	if (ret) {
177 		DRM_ERROR("Failed to process memory training!\n");
178 		return ret;
179 	}
180 
181 	if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
182 		ret= psp_sysfs_init(adev);
183 		if (ret) {
184 			return ret;
185 		}
186 	}
187 
188 	return 0;
189 }
190 
191 static int psp_sw_fini(void *handle)
192 {
193 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
194 
195 	psp_memory_training_fini(&adev->psp);
196 	if (adev->psp.sos_fw) {
197 		release_firmware(adev->psp.sos_fw);
198 		adev->psp.sos_fw = NULL;
199 	}
200 	if (adev->psp.asd_fw) {
201 		release_firmware(adev->psp.asd_fw);
202 		adev->psp.asd_fw = NULL;
203 	}
204 	if (adev->psp.ta_fw) {
205 		release_firmware(adev->psp.ta_fw);
206 		adev->psp.ta_fw = NULL;
207 	}
208 
209 	if (adev->asic_type == CHIP_NAVI10)
210 		psp_sysfs_fini(adev);
211 
212 	return 0;
213 }
214 
215 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
216 		 uint32_t reg_val, uint32_t mask, bool check_changed)
217 {
218 	uint32_t val;
219 	int i;
220 	struct amdgpu_device *adev = psp->adev;
221 
222 	for (i = 0; i < adev->usec_timeout; i++) {
223 		val = RREG32(reg_index);
224 		if (check_changed) {
225 			if (val != reg_val)
226 				return 0;
227 		} else {
228 			if ((val & mask) == reg_val)
229 				return 0;
230 		}
231 		udelay(1);
232 	}
233 
234 	return -ETIME;
235 }
236 
237 static int
238 psp_cmd_submit_buf(struct psp_context *psp,
239 		   struct amdgpu_firmware_info *ucode,
240 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
241 {
242 	int ret;
243 	int index;
244 	int timeout = 2000;
245 	bool ras_intr = false;
246 	bool skip_unsupport = false;
247 
248 	mutex_lock(&psp->mutex);
249 
250 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
251 
252 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
253 
254 	index = atomic_inc_return(&psp->fence_value);
255 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
256 	if (ret) {
257 		atomic_dec(&psp->fence_value);
258 		mutex_unlock(&psp->mutex);
259 		return ret;
260 	}
261 
262 	amdgpu_asic_invalidate_hdp(psp->adev, NULL);
263 	while (*((unsigned int *)psp->fence_buf) != index) {
264 		if (--timeout == 0)
265 			break;
266 		/*
267 		 * Shouldn't wait for timeout when err_event_athub occurs,
268 		 * because gpu reset thread triggered and lock resource should
269 		 * be released for psp resume sequence.
270 		 */
271 		ras_intr = amdgpu_ras_intr_triggered();
272 		if (ras_intr)
273 			break;
274 		msleep(1);
275 		amdgpu_asic_invalidate_hdp(psp->adev, NULL);
276 	}
277 
278 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
279 	skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
280 		psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
281 
282 	/* In some cases, psp response status is not 0 even there is no
283 	 * problem while the command is submitted. Some version of PSP FW
284 	 * doesn't write 0 to that field.
285 	 * So here we would like to only print a warning instead of an error
286 	 * during psp initialization to avoid breaking hw_init and it doesn't
287 	 * return -EINVAL.
288 	 */
289 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
290 		if (ucode)
291 			DRM_WARN("failed to load ucode id (%d) ",
292 				  ucode->ucode_id);
293 		DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
294 			 psp->cmd_buf_mem->cmd_id,
295 			 psp->cmd_buf_mem->resp.status);
296 		if (!timeout) {
297 			mutex_unlock(&psp->mutex);
298 			return -EINVAL;
299 		}
300 	}
301 
302 	/* get xGMI session id from response buffer */
303 	cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id;
304 
305 	if (ucode) {
306 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
307 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
308 	}
309 	mutex_unlock(&psp->mutex);
310 
311 	return ret;
312 }
313 
314 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
315 				 struct psp_gfx_cmd_resp *cmd,
316 				 uint64_t tmr_mc, uint32_t size)
317 {
318 	if (amdgpu_sriov_vf(psp->adev))
319 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
320 	else
321 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
322 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
323 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
324 	cmd->cmd.cmd_setup_tmr.buf_size = size;
325 }
326 
327 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
328 				      uint64_t pri_buf_mc, uint32_t size)
329 {
330 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
331 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
332 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
333 	cmd->cmd.cmd_load_toc.toc_size = size;
334 }
335 
336 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
337 static int psp_load_toc(struct psp_context *psp,
338 			uint32_t *tmr_size)
339 {
340 	int ret;
341 	struct psp_gfx_cmd_resp *cmd;
342 
343 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
344 	if (!cmd)
345 		return -ENOMEM;
346 	/* Copy toc to psp firmware private buffer */
347 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
348 	memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size);
349 
350 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size);
351 
352 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
353 				 psp->fence_buf_mc_addr);
354 	if (!ret)
355 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
356 	kfree(cmd);
357 	return ret;
358 }
359 
360 /* Set up Trusted Memory Region */
361 static int psp_tmr_init(struct psp_context *psp)
362 {
363 	int ret;
364 	int tmr_size;
365 	void *tmr_buf;
366 	void **pptr;
367 
368 	/*
369 	 * According to HW engineer, they prefer the TMR address be "naturally
370 	 * aligned" , e.g. the start address be an integer divide of TMR size.
371 	 *
372 	 * Note: this memory need be reserved till the driver
373 	 * uninitializes.
374 	 */
375 	tmr_size = PSP_TMR_SIZE;
376 
377 	/* For ASICs support RLC autoload, psp will parse the toc
378 	 * and calculate the total size of TMR needed */
379 	if (!amdgpu_sriov_vf(psp->adev) &&
380 	    psp->toc_start_addr &&
381 	    psp->toc_bin_size &&
382 	    psp->fw_pri_buf) {
383 		ret = psp_load_toc(psp, &tmr_size);
384 		if (ret) {
385 			DRM_ERROR("Failed to load toc\n");
386 			return ret;
387 		}
388 	}
389 
390 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
391 	ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
392 				      AMDGPU_GEM_DOMAIN_VRAM,
393 				      &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
394 
395 	return ret;
396 }
397 
398 static int psp_clear_vf_fw(struct psp_context *psp)
399 {
400 	int ret;
401 	struct psp_gfx_cmd_resp *cmd;
402 
403 	if (!amdgpu_sriov_vf(psp->adev) || psp->adev->asic_type != CHIP_NAVI12)
404 		return 0;
405 
406 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
407 	if (!cmd)
408 		return -ENOMEM;
409 
410 	cmd->cmd_id = GFX_CMD_ID_CLEAR_VF_FW;
411 
412 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
413 	kfree(cmd);
414 
415 	return ret;
416 }
417 
418 static bool psp_skip_tmr(struct psp_context *psp)
419 {
420 	switch (psp->adev->asic_type) {
421 	case CHIP_NAVI12:
422 	case CHIP_SIENNA_CICHLID:
423 		return true;
424 	default:
425 		return false;
426 	}
427 }
428 
429 static int psp_tmr_load(struct psp_context *psp)
430 {
431 	int ret;
432 	struct psp_gfx_cmd_resp *cmd;
433 
434 	/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
435 	 * Already set up by host driver.
436 	 */
437 	if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
438 		return 0;
439 
440 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
441 	if (!cmd)
442 		return -ENOMEM;
443 
444 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr,
445 			     amdgpu_bo_size(psp->tmr_bo));
446 	DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
447 		 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
448 
449 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
450 				 psp->fence_buf_mc_addr);
451 
452 	kfree(cmd);
453 
454 	return ret;
455 }
456 
457 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
458 					struct psp_gfx_cmd_resp *cmd)
459 {
460 	if (amdgpu_sriov_vf(psp->adev))
461 		cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
462 	else
463 		cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
464 }
465 
466 static int psp_tmr_unload(struct psp_context *psp)
467 {
468 	int ret;
469 	struct psp_gfx_cmd_resp *cmd;
470 
471 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
472 	if (!cmd)
473 		return -ENOMEM;
474 
475 	psp_prep_tmr_unload_cmd_buf(psp, cmd);
476 	DRM_INFO("free PSP TMR buffer\n");
477 
478 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
479 				 psp->fence_buf_mc_addr);
480 
481 	kfree(cmd);
482 
483 	return ret;
484 }
485 
486 static int psp_tmr_terminate(struct psp_context *psp)
487 {
488 	int ret;
489 	void *tmr_buf;
490 	void **pptr;
491 
492 	ret = psp_tmr_unload(psp);
493 	if (ret)
494 		return ret;
495 
496 	/* free TMR memory buffer */
497 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
498 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
499 
500 	return 0;
501 }
502 
503 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
504 				uint64_t asd_mc, uint32_t size)
505 {
506 	cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
507 	cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
508 	cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
509 	cmd->cmd.cmd_load_ta.app_len = size;
510 
511 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
512 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
513 	cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
514 }
515 
516 static int psp_asd_load(struct psp_context *psp)
517 {
518 	int ret;
519 	struct psp_gfx_cmd_resp *cmd;
520 
521 	/* If PSP version doesn't match ASD version, asd loading will be failed.
522 	 * add workaround to bypass it for sriov now.
523 	 * TODO: add version check to make it common
524 	 */
525 	if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
526 		return 0;
527 
528 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
529 	if (!cmd)
530 		return -ENOMEM;
531 
532 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
533 	memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
534 
535 	psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
536 				  psp->asd_ucode_size);
537 
538 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
539 				 psp->fence_buf_mc_addr);
540 	if (!ret) {
541 		psp->asd_context.asd_initialized = true;
542 		psp->asd_context.session_id = cmd->resp.session_id;
543 	}
544 
545 	kfree(cmd);
546 
547 	return ret;
548 }
549 
550 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
551 				       uint32_t session_id)
552 {
553 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
554 	cmd->cmd.cmd_unload_ta.session_id = session_id;
555 }
556 
557 static int psp_asd_unload(struct psp_context *psp)
558 {
559 	int ret;
560 	struct psp_gfx_cmd_resp *cmd;
561 
562 	if (amdgpu_sriov_vf(psp->adev))
563 		return 0;
564 
565 	if (!psp->asd_context.asd_initialized)
566 		return 0;
567 
568 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
569 	if (!cmd)
570 		return -ENOMEM;
571 
572 	psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
573 
574 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
575 				 psp->fence_buf_mc_addr);
576 	if (!ret)
577 		psp->asd_context.asd_initialized = false;
578 
579 	kfree(cmd);
580 
581 	return ret;
582 }
583 
584 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
585 		uint32_t id, uint32_t value)
586 {
587 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
588 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
589 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
590 }
591 
592 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
593 		uint32_t value)
594 {
595 	struct psp_gfx_cmd_resp *cmd = NULL;
596 	int ret = 0;
597 
598 	if (reg >= PSP_REG_LAST)
599 		return -EINVAL;
600 
601 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
602 	if (!cmd)
603 		return -ENOMEM;
604 
605 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
606 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
607 
608 	kfree(cmd);
609 	return ret;
610 }
611 
612 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
613 				     uint64_t ta_bin_mc,
614 				     uint32_t ta_bin_size,
615 				     uint64_t ta_shared_mc,
616 				     uint32_t ta_shared_size)
617 {
618 	cmd->cmd_id 				= GFX_CMD_ID_LOAD_TA;
619 	cmd->cmd.cmd_load_ta.app_phy_addr_lo 	= lower_32_bits(ta_bin_mc);
620 	cmd->cmd.cmd_load_ta.app_phy_addr_hi 	= upper_32_bits(ta_bin_mc);
621 	cmd->cmd.cmd_load_ta.app_len 		= ta_bin_size;
622 
623 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
624 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
625 	cmd->cmd.cmd_load_ta.cmd_buf_len 	 = ta_shared_size;
626 }
627 
628 static int psp_xgmi_init_shared_buf(struct psp_context *psp)
629 {
630 	int ret;
631 
632 	/*
633 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
634 	 * physical) for xgmi ta <-> Driver
635 	 */
636 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
637 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
638 				      &psp->xgmi_context.xgmi_shared_bo,
639 				      &psp->xgmi_context.xgmi_shared_mc_addr,
640 				      &psp->xgmi_context.xgmi_shared_buf);
641 
642 	return ret;
643 }
644 
645 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
646 				       uint32_t ta_cmd_id,
647 				       uint32_t session_id)
648 {
649 	cmd->cmd_id 				= GFX_CMD_ID_INVOKE_CMD;
650 	cmd->cmd.cmd_invoke_cmd.session_id 	= session_id;
651 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id 	= ta_cmd_id;
652 }
653 
654 static int psp_ta_invoke(struct psp_context *psp,
655 		  uint32_t ta_cmd_id,
656 		  uint32_t session_id)
657 {
658 	int ret;
659 	struct psp_gfx_cmd_resp *cmd;
660 
661 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
662 	if (!cmd)
663 		return -ENOMEM;
664 
665 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
666 
667 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
668 				 psp->fence_buf_mc_addr);
669 
670 	kfree(cmd);
671 
672 	return ret;
673 }
674 
675 static int psp_xgmi_load(struct psp_context *psp)
676 {
677 	int ret;
678 	struct psp_gfx_cmd_resp *cmd;
679 
680 	/*
681 	 * TODO: bypass the loading in sriov for now
682 	 */
683 
684 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
685 	if (!cmd)
686 		return -ENOMEM;
687 
688 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
689 	memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
690 
691 	psp_prep_ta_load_cmd_buf(cmd,
692 				 psp->fw_pri_mc_addr,
693 				 psp->ta_xgmi_ucode_size,
694 				 psp->xgmi_context.xgmi_shared_mc_addr,
695 				 PSP_XGMI_SHARED_MEM_SIZE);
696 
697 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
698 				 psp->fence_buf_mc_addr);
699 
700 	if (!ret) {
701 		psp->xgmi_context.initialized = 1;
702 		psp->xgmi_context.session_id = cmd->resp.session_id;
703 	}
704 
705 	kfree(cmd);
706 
707 	return ret;
708 }
709 
710 static int psp_xgmi_unload(struct psp_context *psp)
711 {
712 	int ret;
713 	struct psp_gfx_cmd_resp *cmd;
714 	struct amdgpu_device *adev = psp->adev;
715 
716 	/* XGMI TA unload currently is not supported on Arcturus */
717 	if (adev->asic_type == CHIP_ARCTURUS)
718 		return 0;
719 
720 	/*
721 	 * TODO: bypass the unloading in sriov for now
722 	 */
723 
724 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
725 	if (!cmd)
726 		return -ENOMEM;
727 
728 	psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
729 
730 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
731 				 psp->fence_buf_mc_addr);
732 
733 	kfree(cmd);
734 
735 	return ret;
736 }
737 
738 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
739 {
740 	return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
741 }
742 
743 int psp_xgmi_terminate(struct psp_context *psp)
744 {
745 	int ret;
746 
747 	if (!psp->xgmi_context.initialized)
748 		return 0;
749 
750 	ret = psp_xgmi_unload(psp);
751 	if (ret)
752 		return ret;
753 
754 	psp->xgmi_context.initialized = 0;
755 
756 	/* free xgmi shared memory */
757 	amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
758 			&psp->xgmi_context.xgmi_shared_mc_addr,
759 			&psp->xgmi_context.xgmi_shared_buf);
760 
761 	return 0;
762 }
763 
764 int psp_xgmi_initialize(struct psp_context *psp)
765 {
766 	struct ta_xgmi_shared_memory *xgmi_cmd;
767 	int ret;
768 
769 	if (!psp->adev->psp.ta_fw ||
770 	    !psp->adev->psp.ta_xgmi_ucode_size ||
771 	    !psp->adev->psp.ta_xgmi_start_addr)
772 		return -ENOENT;
773 
774 	if (!psp->xgmi_context.initialized) {
775 		ret = psp_xgmi_init_shared_buf(psp);
776 		if (ret)
777 			return ret;
778 	}
779 
780 	/* Load XGMI TA */
781 	ret = psp_xgmi_load(psp);
782 	if (ret)
783 		return ret;
784 
785 	/* Initialize XGMI session */
786 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
787 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
788 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
789 
790 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
791 
792 	return ret;
793 }
794 
795 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
796 {
797 	struct ta_xgmi_shared_memory *xgmi_cmd;
798 	int ret;
799 
800 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
801 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
802 
803 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
804 
805 	/* Invoke xgmi ta to get hive id */
806 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
807 	if (ret)
808 		return ret;
809 
810 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
811 
812 	return 0;
813 }
814 
815 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
816 {
817 	struct ta_xgmi_shared_memory *xgmi_cmd;
818 	int ret;
819 
820 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
821 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
822 
823 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
824 
825 	/* Invoke xgmi ta to get the node id */
826 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
827 	if (ret)
828 		return ret;
829 
830 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
831 
832 	return 0;
833 }
834 
835 int psp_xgmi_get_topology_info(struct psp_context *psp,
836 			       int number_devices,
837 			       struct psp_xgmi_topology_info *topology)
838 {
839 	struct ta_xgmi_shared_memory *xgmi_cmd;
840 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
841 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
842 	int i;
843 	int ret;
844 
845 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
846 		return -EINVAL;
847 
848 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
849 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
850 
851 	/* Fill in the shared memory with topology information as input */
852 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
853 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
854 	topology_info_input->num_nodes = number_devices;
855 
856 	for (i = 0; i < topology_info_input->num_nodes; i++) {
857 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
858 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
859 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
860 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
861 	}
862 
863 	/* Invoke xgmi ta to get the topology information */
864 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
865 	if (ret)
866 		return ret;
867 
868 	/* Read the output topology information from the shared memory */
869 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
870 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
871 	for (i = 0; i < topology->num_nodes; i++) {
872 		topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
873 		topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
874 		topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
875 		topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
876 	}
877 
878 	return 0;
879 }
880 
881 int psp_xgmi_set_topology_info(struct psp_context *psp,
882 			       int number_devices,
883 			       struct psp_xgmi_topology_info *topology)
884 {
885 	struct ta_xgmi_shared_memory *xgmi_cmd;
886 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
887 	int i;
888 
889 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
890 		return -EINVAL;
891 
892 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
893 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
894 
895 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
896 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
897 	topology_info_input->num_nodes = number_devices;
898 
899 	for (i = 0; i < topology_info_input->num_nodes; i++) {
900 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
901 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
902 		topology_info_input->nodes[i].is_sharing_enabled = 1;
903 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
904 	}
905 
906 	/* Invoke xgmi ta to set topology information */
907 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
908 }
909 
910 // ras begin
911 static int psp_ras_init_shared_buf(struct psp_context *psp)
912 {
913 	int ret;
914 
915 	/*
916 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
917 	 * physical) for ras ta <-> Driver
918 	 */
919 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
920 			PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
921 			&psp->ras.ras_shared_bo,
922 			&psp->ras.ras_shared_mc_addr,
923 			&psp->ras.ras_shared_buf);
924 
925 	return ret;
926 }
927 
928 static int psp_ras_load(struct psp_context *psp)
929 {
930 	int ret;
931 	struct psp_gfx_cmd_resp *cmd;
932 
933 	/*
934 	 * TODO: bypass the loading in sriov for now
935 	 */
936 	if (amdgpu_sriov_vf(psp->adev))
937 		return 0;
938 
939 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
940 	if (!cmd)
941 		return -ENOMEM;
942 
943 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
944 	memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
945 
946 	psp_prep_ta_load_cmd_buf(cmd,
947 				 psp->fw_pri_mc_addr,
948 				 psp->ta_ras_ucode_size,
949 				 psp->ras.ras_shared_mc_addr,
950 				 PSP_RAS_SHARED_MEM_SIZE);
951 
952 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
953 			psp->fence_buf_mc_addr);
954 
955 	if (!ret) {
956 		psp->ras.ras_initialized = true;
957 		psp->ras.session_id = cmd->resp.session_id;
958 	}
959 
960 	kfree(cmd);
961 
962 	return ret;
963 }
964 
965 static int psp_ras_unload(struct psp_context *psp)
966 {
967 	int ret;
968 	struct psp_gfx_cmd_resp *cmd;
969 
970 	/*
971 	 * TODO: bypass the unloading in sriov for now
972 	 */
973 	if (amdgpu_sriov_vf(psp->adev))
974 		return 0;
975 
976 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
977 	if (!cmd)
978 		return -ENOMEM;
979 
980 	psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
981 
982 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
983 			psp->fence_buf_mc_addr);
984 
985 	kfree(cmd);
986 
987 	return ret;
988 }
989 
990 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
991 {
992 	struct ta_ras_shared_memory *ras_cmd;
993 	int ret;
994 
995 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
996 
997 	/*
998 	 * TODO: bypass the loading in sriov for now
999 	 */
1000 	if (amdgpu_sriov_vf(psp->adev))
1001 		return 0;
1002 
1003 	ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
1004 
1005 	if (amdgpu_ras_intr_triggered())
1006 		return ret;
1007 
1008 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
1009 	{
1010 		DRM_WARN("RAS: Unsupported Interface");
1011 		return -EINVAL;
1012 	}
1013 
1014 	if (!ret) {
1015 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1016 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
1017 
1018 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1019 		}
1020 		else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1021 			dev_warn(psp->adev->dev,
1022 				 "RAS internal register access blocked\n");
1023 	}
1024 
1025 	return ret;
1026 }
1027 
1028 int psp_ras_enable_features(struct psp_context *psp,
1029 		union ta_ras_cmd_input *info, bool enable)
1030 {
1031 	struct ta_ras_shared_memory *ras_cmd;
1032 	int ret;
1033 
1034 	if (!psp->ras.ras_initialized)
1035 		return -EINVAL;
1036 
1037 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
1038 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1039 
1040 	if (enable)
1041 		ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1042 	else
1043 		ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1044 
1045 	ras_cmd->ras_in_message = *info;
1046 
1047 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1048 	if (ret)
1049 		return -EINVAL;
1050 
1051 	return ras_cmd->ras_status;
1052 }
1053 
1054 static int psp_ras_terminate(struct psp_context *psp)
1055 {
1056 	int ret;
1057 
1058 	/*
1059 	 * TODO: bypass the terminate in sriov for now
1060 	 */
1061 	if (amdgpu_sriov_vf(psp->adev))
1062 		return 0;
1063 
1064 	if (!psp->ras.ras_initialized)
1065 		return 0;
1066 
1067 	ret = psp_ras_unload(psp);
1068 	if (ret)
1069 		return ret;
1070 
1071 	psp->ras.ras_initialized = false;
1072 
1073 	/* free ras shared memory */
1074 	amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
1075 			&psp->ras.ras_shared_mc_addr,
1076 			&psp->ras.ras_shared_buf);
1077 
1078 	return 0;
1079 }
1080 
1081 static int psp_ras_initialize(struct psp_context *psp)
1082 {
1083 	int ret;
1084 
1085 	/*
1086 	 * TODO: bypass the initialize in sriov for now
1087 	 */
1088 	if (amdgpu_sriov_vf(psp->adev))
1089 		return 0;
1090 
1091 	if (!psp->adev->psp.ta_ras_ucode_size ||
1092 	    !psp->adev->psp.ta_ras_start_addr) {
1093 		dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
1094 		return 0;
1095 	}
1096 
1097 	if (!psp->ras.ras_initialized) {
1098 		ret = psp_ras_init_shared_buf(psp);
1099 		if (ret)
1100 			return ret;
1101 	}
1102 
1103 	ret = psp_ras_load(psp);
1104 	if (ret)
1105 		return ret;
1106 
1107 	return 0;
1108 }
1109 
1110 int psp_ras_trigger_error(struct psp_context *psp,
1111 			  struct ta_ras_trigger_error_input *info)
1112 {
1113 	struct ta_ras_shared_memory *ras_cmd;
1114 	int ret;
1115 
1116 	if (!psp->ras.ras_initialized)
1117 		return -EINVAL;
1118 
1119 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
1120 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1121 
1122 	ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1123 	ras_cmd->ras_in_message.trigger_error = *info;
1124 
1125 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1126 	if (ret)
1127 		return -EINVAL;
1128 
1129 	/* If err_event_athub occurs error inject was successful, however
1130 	   return status from TA is no long reliable */
1131 	if (amdgpu_ras_intr_triggered())
1132 		return 0;
1133 
1134 	return ras_cmd->ras_status;
1135 }
1136 // ras end
1137 
1138 // HDCP start
1139 static int psp_hdcp_init_shared_buf(struct psp_context *psp)
1140 {
1141 	int ret;
1142 
1143 	/*
1144 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1145 	 * physical) for hdcp ta <-> Driver
1146 	 */
1147 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
1148 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1149 				      &psp->hdcp_context.hdcp_shared_bo,
1150 				      &psp->hdcp_context.hdcp_shared_mc_addr,
1151 				      &psp->hdcp_context.hdcp_shared_buf);
1152 
1153 	return ret;
1154 }
1155 
1156 static int psp_hdcp_load(struct psp_context *psp)
1157 {
1158 	int ret;
1159 	struct psp_gfx_cmd_resp *cmd;
1160 
1161 	/*
1162 	 * TODO: bypass the loading in sriov for now
1163 	 */
1164 	if (amdgpu_sriov_vf(psp->adev))
1165 		return 0;
1166 
1167 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1168 	if (!cmd)
1169 		return -ENOMEM;
1170 
1171 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1172 	memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
1173 	       psp->ta_hdcp_ucode_size);
1174 
1175 	psp_prep_ta_load_cmd_buf(cmd,
1176 				 psp->fw_pri_mc_addr,
1177 				 psp->ta_hdcp_ucode_size,
1178 				 psp->hdcp_context.hdcp_shared_mc_addr,
1179 				 PSP_HDCP_SHARED_MEM_SIZE);
1180 
1181 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1182 
1183 	if (!ret) {
1184 		psp->hdcp_context.hdcp_initialized = true;
1185 		psp->hdcp_context.session_id = cmd->resp.session_id;
1186 		mutex_init(&psp->hdcp_context.mutex);
1187 	}
1188 
1189 	kfree(cmd);
1190 
1191 	return ret;
1192 }
1193 static int psp_hdcp_initialize(struct psp_context *psp)
1194 {
1195 	int ret;
1196 
1197 	/*
1198 	 * TODO: bypass the initialize in sriov for now
1199 	 */
1200 	if (amdgpu_sriov_vf(psp->adev))
1201 		return 0;
1202 
1203 	if (!psp->adev->psp.ta_hdcp_ucode_size ||
1204 	    !psp->adev->psp.ta_hdcp_start_addr) {
1205 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1206 		return 0;
1207 	}
1208 
1209 	if (!psp->hdcp_context.hdcp_initialized) {
1210 		ret = psp_hdcp_init_shared_buf(psp);
1211 		if (ret)
1212 			return ret;
1213 	}
1214 
1215 	ret = psp_hdcp_load(psp);
1216 	if (ret)
1217 		return ret;
1218 
1219 	return 0;
1220 }
1221 
1222 static int psp_hdcp_unload(struct psp_context *psp)
1223 {
1224 	int ret;
1225 	struct psp_gfx_cmd_resp *cmd;
1226 
1227 	/*
1228 	 * TODO: bypass the unloading in sriov for now
1229 	 */
1230 	if (amdgpu_sriov_vf(psp->adev))
1231 		return 0;
1232 
1233 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1234 	if (!cmd)
1235 		return -ENOMEM;
1236 
1237 	psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
1238 
1239 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1240 
1241 	kfree(cmd);
1242 
1243 	return ret;
1244 }
1245 
1246 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1247 {
1248 	/*
1249 	 * TODO: bypass the loading in sriov for now
1250 	 */
1251 	if (amdgpu_sriov_vf(psp->adev))
1252 		return 0;
1253 
1254 	return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
1255 }
1256 
1257 static int psp_hdcp_terminate(struct psp_context *psp)
1258 {
1259 	int ret;
1260 
1261 	/*
1262 	 * TODO: bypass the terminate in sriov for now
1263 	 */
1264 	if (amdgpu_sriov_vf(psp->adev))
1265 		return 0;
1266 
1267 	if (!psp->hdcp_context.hdcp_initialized)
1268 		return 0;
1269 
1270 	ret = psp_hdcp_unload(psp);
1271 	if (ret)
1272 		return ret;
1273 
1274 	psp->hdcp_context.hdcp_initialized = false;
1275 
1276 	/* free hdcp shared memory */
1277 	amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
1278 			      &psp->hdcp_context.hdcp_shared_mc_addr,
1279 			      &psp->hdcp_context.hdcp_shared_buf);
1280 
1281 	return 0;
1282 }
1283 // HDCP end
1284 
1285 // DTM start
1286 static int psp_dtm_init_shared_buf(struct psp_context *psp)
1287 {
1288 	int ret;
1289 
1290 	/*
1291 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1292 	 * physical) for dtm ta <-> Driver
1293 	 */
1294 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
1295 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1296 				      &psp->dtm_context.dtm_shared_bo,
1297 				      &psp->dtm_context.dtm_shared_mc_addr,
1298 				      &psp->dtm_context.dtm_shared_buf);
1299 
1300 	return ret;
1301 }
1302 
1303 static int psp_dtm_load(struct psp_context *psp)
1304 {
1305 	int ret;
1306 	struct psp_gfx_cmd_resp *cmd;
1307 
1308 	/*
1309 	 * TODO: bypass the loading in sriov for now
1310 	 */
1311 	if (amdgpu_sriov_vf(psp->adev))
1312 		return 0;
1313 
1314 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1315 	if (!cmd)
1316 		return -ENOMEM;
1317 
1318 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1319 	memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
1320 
1321 	psp_prep_ta_load_cmd_buf(cmd,
1322 				 psp->fw_pri_mc_addr,
1323 				 psp->ta_dtm_ucode_size,
1324 				 psp->dtm_context.dtm_shared_mc_addr,
1325 				 PSP_DTM_SHARED_MEM_SIZE);
1326 
1327 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1328 
1329 	if (!ret) {
1330 		psp->dtm_context.dtm_initialized = true;
1331 		psp->dtm_context.session_id = cmd->resp.session_id;
1332 		mutex_init(&psp->dtm_context.mutex);
1333 	}
1334 
1335 	kfree(cmd);
1336 
1337 	return ret;
1338 }
1339 
1340 static int psp_dtm_initialize(struct psp_context *psp)
1341 {
1342 	int ret;
1343 
1344 	/*
1345 	 * TODO: bypass the initialize in sriov for now
1346 	 */
1347 	if (amdgpu_sriov_vf(psp->adev))
1348 		return 0;
1349 
1350 	if (!psp->adev->psp.ta_dtm_ucode_size ||
1351 	    !psp->adev->psp.ta_dtm_start_addr) {
1352 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1353 		return 0;
1354 	}
1355 
1356 	if (!psp->dtm_context.dtm_initialized) {
1357 		ret = psp_dtm_init_shared_buf(psp);
1358 		if (ret)
1359 			return ret;
1360 	}
1361 
1362 	ret = psp_dtm_load(psp);
1363 	if (ret)
1364 		return ret;
1365 
1366 	return 0;
1367 }
1368 
1369 static int psp_dtm_unload(struct psp_context *psp)
1370 {
1371 	int ret;
1372 	struct psp_gfx_cmd_resp *cmd;
1373 
1374 	/*
1375 	 * TODO: bypass the unloading in sriov for now
1376 	 */
1377 	if (amdgpu_sriov_vf(psp->adev))
1378 		return 0;
1379 
1380 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1381 	if (!cmd)
1382 		return -ENOMEM;
1383 
1384 	psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
1385 
1386 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1387 
1388 	kfree(cmd);
1389 
1390 	return ret;
1391 }
1392 
1393 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1394 {
1395 	/*
1396 	 * TODO: bypass the loading in sriov for now
1397 	 */
1398 	if (amdgpu_sriov_vf(psp->adev))
1399 		return 0;
1400 
1401 	return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
1402 }
1403 
1404 static int psp_dtm_terminate(struct psp_context *psp)
1405 {
1406 	int ret;
1407 
1408 	/*
1409 	 * TODO: bypass the terminate in sriov for now
1410 	 */
1411 	if (amdgpu_sriov_vf(psp->adev))
1412 		return 0;
1413 
1414 	if (!psp->dtm_context.dtm_initialized)
1415 		return 0;
1416 
1417 	ret = psp_dtm_unload(psp);
1418 	if (ret)
1419 		return ret;
1420 
1421 	psp->dtm_context.dtm_initialized = false;
1422 
1423 	/* free hdcp shared memory */
1424 	amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
1425 			      &psp->dtm_context.dtm_shared_mc_addr,
1426 			      &psp->dtm_context.dtm_shared_buf);
1427 
1428 	return 0;
1429 }
1430 // DTM end
1431 
1432 static int psp_hw_start(struct psp_context *psp)
1433 {
1434 	struct amdgpu_device *adev = psp->adev;
1435 	int ret;
1436 
1437 	if (!amdgpu_sriov_vf(adev)) {
1438 		if (psp->kdb_bin_size &&
1439 		    (psp->funcs->bootloader_load_kdb != NULL)) {
1440 			ret = psp_bootloader_load_kdb(psp);
1441 			if (ret) {
1442 				DRM_ERROR("PSP load kdb failed!\n");
1443 				return ret;
1444 			}
1445 		}
1446 
1447 		if (psp->spl_bin_size) {
1448 			ret = psp_bootloader_load_spl(psp);
1449 			if (ret) {
1450 				DRM_ERROR("PSP load spl failed!\n");
1451 				return ret;
1452 			}
1453 		}
1454 
1455 		ret = psp_bootloader_load_sysdrv(psp);
1456 		if (ret) {
1457 			DRM_ERROR("PSP load sysdrv failed!\n");
1458 			return ret;
1459 		}
1460 
1461 		ret = psp_bootloader_load_sos(psp);
1462 		if (ret) {
1463 			DRM_ERROR("PSP load sos failed!\n");
1464 			return ret;
1465 		}
1466 	}
1467 
1468 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
1469 	if (ret) {
1470 		DRM_ERROR("PSP create ring failed!\n");
1471 		return ret;
1472 	}
1473 
1474 	ret = psp_clear_vf_fw(psp);
1475 	if (ret) {
1476 		DRM_ERROR("PSP clear vf fw!\n");
1477 		return ret;
1478 	}
1479 
1480 	ret = psp_tmr_init(psp);
1481 	if (ret) {
1482 		DRM_ERROR("PSP tmr init failed!\n");
1483 		return ret;
1484 	}
1485 
1486 	/*
1487 	 * For ASICs with DF Cstate management centralized
1488 	 * to PMFW, TMR setup should be performed after PMFW
1489 	 * loaded and before other non-psp firmware loaded.
1490 	 */
1491 	if (psp->pmfw_centralized_cstate_management) {
1492 		ret = psp_load_smu_fw(psp);
1493 		if (ret)
1494 			return ret;
1495 	}
1496 
1497 	ret = psp_tmr_load(psp);
1498 	if (ret) {
1499 		DRM_ERROR("PSP load tmr failed!\n");
1500 		return ret;
1501 	}
1502 
1503 	return 0;
1504 }
1505 
1506 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
1507 			   enum psp_gfx_fw_type *type)
1508 {
1509 	switch (ucode->ucode_id) {
1510 	case AMDGPU_UCODE_ID_SDMA0:
1511 		*type = GFX_FW_TYPE_SDMA0;
1512 		break;
1513 	case AMDGPU_UCODE_ID_SDMA1:
1514 		*type = GFX_FW_TYPE_SDMA1;
1515 		break;
1516 	case AMDGPU_UCODE_ID_SDMA2:
1517 		*type = GFX_FW_TYPE_SDMA2;
1518 		break;
1519 	case AMDGPU_UCODE_ID_SDMA3:
1520 		*type = GFX_FW_TYPE_SDMA3;
1521 		break;
1522 	case AMDGPU_UCODE_ID_SDMA4:
1523 		*type = GFX_FW_TYPE_SDMA4;
1524 		break;
1525 	case AMDGPU_UCODE_ID_SDMA5:
1526 		*type = GFX_FW_TYPE_SDMA5;
1527 		break;
1528 	case AMDGPU_UCODE_ID_SDMA6:
1529 		*type = GFX_FW_TYPE_SDMA6;
1530 		break;
1531 	case AMDGPU_UCODE_ID_SDMA7:
1532 		*type = GFX_FW_TYPE_SDMA7;
1533 		break;
1534 	case AMDGPU_UCODE_ID_CP_MES:
1535 		*type = GFX_FW_TYPE_CP_MES;
1536 		break;
1537 	case AMDGPU_UCODE_ID_CP_MES_DATA:
1538 		*type = GFX_FW_TYPE_MES_STACK;
1539 		break;
1540 	case AMDGPU_UCODE_ID_CP_CE:
1541 		*type = GFX_FW_TYPE_CP_CE;
1542 		break;
1543 	case AMDGPU_UCODE_ID_CP_PFP:
1544 		*type = GFX_FW_TYPE_CP_PFP;
1545 		break;
1546 	case AMDGPU_UCODE_ID_CP_ME:
1547 		*type = GFX_FW_TYPE_CP_ME;
1548 		break;
1549 	case AMDGPU_UCODE_ID_CP_MEC1:
1550 		*type = GFX_FW_TYPE_CP_MEC;
1551 		break;
1552 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
1553 		*type = GFX_FW_TYPE_CP_MEC_ME1;
1554 		break;
1555 	case AMDGPU_UCODE_ID_CP_MEC2:
1556 		*type = GFX_FW_TYPE_CP_MEC;
1557 		break;
1558 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
1559 		*type = GFX_FW_TYPE_CP_MEC_ME2;
1560 		break;
1561 	case AMDGPU_UCODE_ID_RLC_G:
1562 		*type = GFX_FW_TYPE_RLC_G;
1563 		break;
1564 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
1565 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
1566 		break;
1567 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
1568 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
1569 		break;
1570 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
1571 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
1572 		break;
1573 	case AMDGPU_UCODE_ID_SMC:
1574 		*type = GFX_FW_TYPE_SMU;
1575 		break;
1576 	case AMDGPU_UCODE_ID_UVD:
1577 		*type = GFX_FW_TYPE_UVD;
1578 		break;
1579 	case AMDGPU_UCODE_ID_UVD1:
1580 		*type = GFX_FW_TYPE_UVD1;
1581 		break;
1582 	case AMDGPU_UCODE_ID_VCE:
1583 		*type = GFX_FW_TYPE_VCE;
1584 		break;
1585 	case AMDGPU_UCODE_ID_VCN:
1586 		*type = GFX_FW_TYPE_VCN;
1587 		break;
1588 	case AMDGPU_UCODE_ID_VCN1:
1589 		*type = GFX_FW_TYPE_VCN1;
1590 		break;
1591 	case AMDGPU_UCODE_ID_DMCU_ERAM:
1592 		*type = GFX_FW_TYPE_DMCU_ERAM;
1593 		break;
1594 	case AMDGPU_UCODE_ID_DMCU_INTV:
1595 		*type = GFX_FW_TYPE_DMCU_ISR;
1596 		break;
1597 	case AMDGPU_UCODE_ID_VCN0_RAM:
1598 		*type = GFX_FW_TYPE_VCN0_RAM;
1599 		break;
1600 	case AMDGPU_UCODE_ID_VCN1_RAM:
1601 		*type = GFX_FW_TYPE_VCN1_RAM;
1602 		break;
1603 	case AMDGPU_UCODE_ID_DMCUB:
1604 		*type = GFX_FW_TYPE_DMUB;
1605 		break;
1606 	case AMDGPU_UCODE_ID_MAXIMUM:
1607 	default:
1608 		return -EINVAL;
1609 	}
1610 
1611 	return 0;
1612 }
1613 
1614 static void psp_print_fw_hdr(struct psp_context *psp,
1615 			     struct amdgpu_firmware_info *ucode)
1616 {
1617 	struct amdgpu_device *adev = psp->adev;
1618 	struct common_firmware_header *hdr;
1619 
1620 	switch (ucode->ucode_id) {
1621 	case AMDGPU_UCODE_ID_SDMA0:
1622 	case AMDGPU_UCODE_ID_SDMA1:
1623 	case AMDGPU_UCODE_ID_SDMA2:
1624 	case AMDGPU_UCODE_ID_SDMA3:
1625 	case AMDGPU_UCODE_ID_SDMA4:
1626 	case AMDGPU_UCODE_ID_SDMA5:
1627 	case AMDGPU_UCODE_ID_SDMA6:
1628 	case AMDGPU_UCODE_ID_SDMA7:
1629 		hdr = (struct common_firmware_header *)
1630 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
1631 		amdgpu_ucode_print_sdma_hdr(hdr);
1632 		break;
1633 	case AMDGPU_UCODE_ID_CP_CE:
1634 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
1635 		amdgpu_ucode_print_gfx_hdr(hdr);
1636 		break;
1637 	case AMDGPU_UCODE_ID_CP_PFP:
1638 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
1639 		amdgpu_ucode_print_gfx_hdr(hdr);
1640 		break;
1641 	case AMDGPU_UCODE_ID_CP_ME:
1642 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
1643 		amdgpu_ucode_print_gfx_hdr(hdr);
1644 		break;
1645 	case AMDGPU_UCODE_ID_CP_MEC1:
1646 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
1647 		amdgpu_ucode_print_gfx_hdr(hdr);
1648 		break;
1649 	case AMDGPU_UCODE_ID_RLC_G:
1650 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
1651 		amdgpu_ucode_print_rlc_hdr(hdr);
1652 		break;
1653 	case AMDGPU_UCODE_ID_SMC:
1654 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
1655 		amdgpu_ucode_print_smc_hdr(hdr);
1656 		break;
1657 	default:
1658 		break;
1659 	}
1660 }
1661 
1662 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
1663 				       struct psp_gfx_cmd_resp *cmd)
1664 {
1665 	int ret;
1666 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
1667 
1668 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
1669 
1670 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1671 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
1672 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
1673 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
1674 
1675 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
1676 	if (ret)
1677 		DRM_ERROR("Unknown firmware type\n");
1678 
1679 	return ret;
1680 }
1681 
1682 static int psp_execute_np_fw_load(struct psp_context *psp,
1683 			          struct amdgpu_firmware_info *ucode)
1684 {
1685 	int ret = 0;
1686 
1687 	ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd);
1688 	if (ret)
1689 		return ret;
1690 
1691 	ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
1692 				 psp->fence_buf_mc_addr);
1693 
1694 	return ret;
1695 }
1696 
1697 static int psp_load_smu_fw(struct psp_context *psp)
1698 {
1699 	int ret;
1700 	struct amdgpu_device* adev = psp->adev;
1701 	struct amdgpu_firmware_info *ucode =
1702 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
1703 	struct amdgpu_ras *ras = psp->ras.ras;
1704 
1705 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
1706 		return 0;
1707 
1708 
1709 	if (adev->in_gpu_reset && ras && ras->supported) {
1710 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
1711 		if (ret) {
1712 			DRM_WARN("Failed to set MP1 state prepare for reload\n");
1713 		}
1714 	}
1715 
1716 	ret = psp_execute_np_fw_load(psp, ucode);
1717 
1718 	if (ret)
1719 		DRM_ERROR("PSP load smu failed!\n");
1720 
1721 	return ret;
1722 }
1723 
1724 static bool fw_load_skip_check(struct psp_context *psp,
1725 			       struct amdgpu_firmware_info *ucode)
1726 {
1727 	if (!ucode->fw)
1728 		return true;
1729 
1730 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
1731 	    (psp_smu_reload_quirk(psp) ||
1732 	     psp->autoload_supported ||
1733 	     psp->pmfw_centralized_cstate_management))
1734 		return true;
1735 
1736 	if (amdgpu_sriov_vf(psp->adev) &&
1737 	   (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
1738 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
1739 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
1740 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
1741 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
1742 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
1743 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
1744 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
1745 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
1746 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
1747 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
1748 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
1749 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
1750 		/*skip ucode loading in SRIOV VF */
1751 		return true;
1752 
1753 	if (psp->autoload_supported &&
1754 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
1755 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
1756 		/* skip mec JT when autoload is enabled */
1757 		return true;
1758 
1759 	return false;
1760 }
1761 
1762 static int psp_np_fw_load(struct psp_context *psp)
1763 {
1764 	int i, ret;
1765 	struct amdgpu_firmware_info *ucode;
1766 	struct amdgpu_device* adev = psp->adev;
1767 
1768 	if (psp->autoload_supported &&
1769 	    !psp->pmfw_centralized_cstate_management) {
1770 		ret = psp_load_smu_fw(psp);
1771 		if (ret)
1772 			return ret;
1773 	}
1774 
1775 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
1776 		ucode = &adev->firmware.ucode[i];
1777 
1778 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
1779 		    !fw_load_skip_check(psp, ucode)) {
1780 			ret = psp_load_smu_fw(psp);
1781 			if (ret)
1782 				return ret;
1783 			continue;
1784 		}
1785 
1786 		if (fw_load_skip_check(psp, ucode))
1787 			continue;
1788 
1789 		if (psp->autoload_supported &&
1790 		    (adev->asic_type == CHIP_SIENNA_CICHLID ||
1791 		     adev->asic_type == CHIP_NAVY_FLOUNDER) &&
1792 		    (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
1793 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
1794 		     ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
1795 			/* PSP only receive one SDMA fw for sienna_cichlid,
1796 			 * as all four sdma fw are same */
1797 			continue;
1798 
1799 		psp_print_fw_hdr(psp, ucode);
1800 
1801 		ret = psp_execute_np_fw_load(psp, ucode);
1802 		if (ret)
1803 			return ret;
1804 
1805 		/* Start rlc autoload after psp recieved all the gfx firmware */
1806 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
1807 		    AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
1808 			ret = psp_rlc_autoload_start(psp);
1809 			if (ret) {
1810 				DRM_ERROR("Failed to start rlc autoload\n");
1811 				return ret;
1812 			}
1813 		}
1814 	}
1815 
1816 	return 0;
1817 }
1818 
1819 static int psp_load_fw(struct amdgpu_device *adev)
1820 {
1821 	int ret;
1822 	struct psp_context *psp = &adev->psp;
1823 
1824 	if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
1825 		psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
1826 		goto skip_memalloc;
1827 	}
1828 
1829 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1830 	if (!psp->cmd)
1831 		return -ENOMEM;
1832 
1833 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
1834 					AMDGPU_GEM_DOMAIN_GTT,
1835 					&psp->fw_pri_bo,
1836 					&psp->fw_pri_mc_addr,
1837 					&psp->fw_pri_buf);
1838 	if (ret)
1839 		goto failed;
1840 
1841 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
1842 					AMDGPU_GEM_DOMAIN_VRAM,
1843 					&psp->fence_buf_bo,
1844 					&psp->fence_buf_mc_addr,
1845 					&psp->fence_buf);
1846 	if (ret)
1847 		goto failed;
1848 
1849 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
1850 				      AMDGPU_GEM_DOMAIN_VRAM,
1851 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
1852 				      (void **)&psp->cmd_buf_mem);
1853 	if (ret)
1854 		goto failed;
1855 
1856 	memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
1857 
1858 	ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
1859 	if (ret) {
1860 		DRM_ERROR("PSP ring init failed!\n");
1861 		goto failed;
1862 	}
1863 
1864 skip_memalloc:
1865 	ret = psp_hw_start(psp);
1866 	if (ret)
1867 		goto failed;
1868 
1869 	ret = psp_np_fw_load(psp);
1870 	if (ret)
1871 		goto failed;
1872 
1873 	ret = psp_asd_load(psp);
1874 	if (ret) {
1875 		DRM_ERROR("PSP load asd failed!\n");
1876 		return ret;
1877 	}
1878 
1879 	if (psp->adev->psp.ta_fw) {
1880 		ret = psp_ras_initialize(psp);
1881 		if (ret)
1882 			dev_err(psp->adev->dev,
1883 					"RAS: Failed to initialize RAS\n");
1884 
1885 		ret = psp_hdcp_initialize(psp);
1886 		if (ret)
1887 			dev_err(psp->adev->dev,
1888 				"HDCP: Failed to initialize HDCP\n");
1889 
1890 		ret = psp_dtm_initialize(psp);
1891 		if (ret)
1892 			dev_err(psp->adev->dev,
1893 				"DTM: Failed to initialize DTM\n");
1894 	}
1895 
1896 	return 0;
1897 
1898 failed:
1899 	/*
1900 	 * all cleanup jobs (xgmi terminate, ras terminate,
1901 	 * ring destroy, cmd/fence/fw buffers destory,
1902 	 * psp->cmd destory) are delayed to psp_hw_fini
1903 	 */
1904 	return ret;
1905 }
1906 
1907 static int psp_hw_init(void *handle)
1908 {
1909 	int ret;
1910 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1911 
1912 	mutex_lock(&adev->firmware.mutex);
1913 	/*
1914 	 * This sequence is just used on hw_init only once, no need on
1915 	 * resume.
1916 	 */
1917 	ret = amdgpu_ucode_init_bo(adev);
1918 	if (ret)
1919 		goto failed;
1920 
1921 	ret = psp_load_fw(adev);
1922 	if (ret) {
1923 		DRM_ERROR("PSP firmware loading failed\n");
1924 		goto failed;
1925 	}
1926 
1927 	mutex_unlock(&adev->firmware.mutex);
1928 	return 0;
1929 
1930 failed:
1931 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
1932 	mutex_unlock(&adev->firmware.mutex);
1933 	return -EINVAL;
1934 }
1935 
1936 static int psp_hw_fini(void *handle)
1937 {
1938 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1939 	struct psp_context *psp = &adev->psp;
1940 	int ret;
1941 
1942 	if (psp->adev->psp.ta_fw) {
1943 		psp_ras_terminate(psp);
1944 		psp_dtm_terminate(psp);
1945 		psp_hdcp_terminate(psp);
1946 	}
1947 
1948 	psp_asd_unload(psp);
1949 	ret = psp_clear_vf_fw(psp);
1950 	if (ret) {
1951 		DRM_ERROR("PSP clear vf fw!\n");
1952 		return ret;
1953 	}
1954 
1955 	psp_tmr_terminate(psp);
1956 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
1957 
1958 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
1959 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
1960 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
1961 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
1962 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
1963 			      (void **)&psp->cmd_buf_mem);
1964 
1965 	kfree(psp->cmd);
1966 	psp->cmd = NULL;
1967 
1968 	return 0;
1969 }
1970 
1971 static int psp_suspend(void *handle)
1972 {
1973 	int ret;
1974 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1975 	struct psp_context *psp = &adev->psp;
1976 
1977 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1978 	    psp->xgmi_context.initialized == 1) {
1979 		ret = psp_xgmi_terminate(psp);
1980 		if (ret) {
1981 			DRM_ERROR("Failed to terminate xgmi ta\n");
1982 			return ret;
1983 		}
1984 	}
1985 
1986 	if (psp->adev->psp.ta_fw) {
1987 		ret = psp_ras_terminate(psp);
1988 		if (ret) {
1989 			DRM_ERROR("Failed to terminate ras ta\n");
1990 			return ret;
1991 		}
1992 		ret = psp_hdcp_terminate(psp);
1993 		if (ret) {
1994 			DRM_ERROR("Failed to terminate hdcp ta\n");
1995 			return ret;
1996 		}
1997 		ret = psp_dtm_terminate(psp);
1998 		if (ret) {
1999 			DRM_ERROR("Failed to terminate dtm ta\n");
2000 			return ret;
2001 		}
2002 	}
2003 
2004 	ret = psp_asd_unload(psp);
2005 	if (ret) {
2006 		DRM_ERROR("Failed to unload asd\n");
2007 		return ret;
2008 	}
2009 
2010 	ret = psp_tmr_terminate(psp);
2011 	if (ret) {
2012 		DRM_ERROR("Failed to terminate tmr\n");
2013 		return ret;
2014 	}
2015 
2016 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2017 	if (ret) {
2018 		DRM_ERROR("PSP ring stop failed\n");
2019 		return ret;
2020 	}
2021 
2022 	return 0;
2023 }
2024 
2025 static int psp_resume(void *handle)
2026 {
2027 	int ret;
2028 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2029 	struct psp_context *psp = &adev->psp;
2030 
2031 	DRM_INFO("PSP is resuming...\n");
2032 
2033 	ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2034 	if (ret) {
2035 		DRM_ERROR("Failed to process memory training!\n");
2036 		return ret;
2037 	}
2038 
2039 	mutex_lock(&adev->firmware.mutex);
2040 
2041 	ret = psp_hw_start(psp);
2042 	if (ret)
2043 		goto failed;
2044 
2045 	ret = psp_np_fw_load(psp);
2046 	if (ret)
2047 		goto failed;
2048 
2049 	ret = psp_asd_load(psp);
2050 	if (ret) {
2051 		DRM_ERROR("PSP load asd failed!\n");
2052 		goto failed;
2053 	}
2054 
2055 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2056 		ret = psp_xgmi_initialize(psp);
2057 		/* Warning the XGMI seesion initialize failure
2058 		 * Instead of stop driver initialization
2059 		 */
2060 		if (ret)
2061 			dev_err(psp->adev->dev,
2062 				"XGMI: Failed to initialize XGMI session\n");
2063 	}
2064 
2065 	if (psp->adev->psp.ta_fw) {
2066 		ret = psp_ras_initialize(psp);
2067 		if (ret)
2068 			dev_err(psp->adev->dev,
2069 					"RAS: Failed to initialize RAS\n");
2070 
2071 		ret = psp_hdcp_initialize(psp);
2072 		if (ret)
2073 			dev_err(psp->adev->dev,
2074 				"HDCP: Failed to initialize HDCP\n");
2075 
2076 		ret = psp_dtm_initialize(psp);
2077 		if (ret)
2078 			dev_err(psp->adev->dev,
2079 				"DTM: Failed to initialize DTM\n");
2080 	}
2081 
2082 	mutex_unlock(&adev->firmware.mutex);
2083 
2084 	return 0;
2085 
2086 failed:
2087 	DRM_ERROR("PSP resume failed\n");
2088 	mutex_unlock(&adev->firmware.mutex);
2089 	return ret;
2090 }
2091 
2092 int psp_gpu_reset(struct amdgpu_device *adev)
2093 {
2094 	int ret;
2095 
2096 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
2097 		return 0;
2098 
2099 	mutex_lock(&adev->psp.mutex);
2100 	ret = psp_mode1_reset(&adev->psp);
2101 	mutex_unlock(&adev->psp.mutex);
2102 
2103 	return ret;
2104 }
2105 
2106 int psp_rlc_autoload_start(struct psp_context *psp)
2107 {
2108 	int ret;
2109 	struct psp_gfx_cmd_resp *cmd;
2110 
2111 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
2112 	if (!cmd)
2113 		return -ENOMEM;
2114 
2115 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
2116 
2117 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
2118 				 psp->fence_buf_mc_addr);
2119 	kfree(cmd);
2120 	return ret;
2121 }
2122 
2123 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
2124 			uint64_t cmd_gpu_addr, int cmd_size)
2125 {
2126 	struct amdgpu_firmware_info ucode = {0};
2127 
2128 	ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
2129 		AMDGPU_UCODE_ID_VCN0_RAM;
2130 	ucode.mc_addr = cmd_gpu_addr;
2131 	ucode.ucode_size = cmd_size;
2132 
2133 	return psp_execute_np_fw_load(&adev->psp, &ucode);
2134 }
2135 
2136 int psp_ring_cmd_submit(struct psp_context *psp,
2137 			uint64_t cmd_buf_mc_addr,
2138 			uint64_t fence_mc_addr,
2139 			int index)
2140 {
2141 	unsigned int psp_write_ptr_reg = 0;
2142 	struct psp_gfx_rb_frame *write_frame;
2143 	struct psp_ring *ring = &psp->km_ring;
2144 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
2145 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
2146 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
2147 	struct amdgpu_device *adev = psp->adev;
2148 	uint32_t ring_size_dw = ring->ring_size / 4;
2149 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
2150 
2151 	/* KM (GPCOM) prepare write pointer */
2152 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
2153 
2154 	/* Update KM RB frame pointer to new frame */
2155 	/* write_frame ptr increments by size of rb_frame in bytes */
2156 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
2157 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
2158 		write_frame = ring_buffer_start;
2159 	else
2160 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
2161 	/* Check invalid write_frame ptr address */
2162 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
2163 		DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
2164 			  ring_buffer_start, ring_buffer_end, write_frame);
2165 		DRM_ERROR("write_frame is pointing to address out of bounds\n");
2166 		return -EINVAL;
2167 	}
2168 
2169 	/* Initialize KM RB frame */
2170 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
2171 
2172 	/* Update KM RB frame */
2173 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
2174 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
2175 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
2176 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
2177 	write_frame->fence_value = index;
2178 	amdgpu_asic_flush_hdp(adev, NULL);
2179 
2180 	/* Update the write Pointer in DWORDs */
2181 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
2182 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
2183 	return 0;
2184 }
2185 
2186 int psp_init_asd_microcode(struct psp_context *psp,
2187 			   const char *chip_name)
2188 {
2189 	struct amdgpu_device *adev = psp->adev;
2190 	char fw_name[30];
2191 	const struct psp_firmware_header_v1_0 *asd_hdr;
2192 	int err = 0;
2193 
2194 	if (!chip_name) {
2195 		dev_err(adev->dev, "invalid chip name for asd microcode\n");
2196 		return -EINVAL;
2197 	}
2198 
2199 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
2200 	err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
2201 	if (err)
2202 		goto out;
2203 
2204 	err = amdgpu_ucode_validate(adev->psp.asd_fw);
2205 	if (err)
2206 		goto out;
2207 
2208 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
2209 	adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
2210 	adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
2211 	adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
2212 	adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
2213 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
2214 	return 0;
2215 out:
2216 	dev_err(adev->dev, "fail to initialize asd microcode\n");
2217 	release_firmware(adev->psp.asd_fw);
2218 	adev->psp.asd_fw = NULL;
2219 	return err;
2220 }
2221 
2222 int psp_init_sos_microcode(struct psp_context *psp,
2223 			   const char *chip_name)
2224 {
2225 	struct amdgpu_device *adev = psp->adev;
2226 	char fw_name[30];
2227 	const struct psp_firmware_header_v1_0 *sos_hdr;
2228 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
2229 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
2230 	const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
2231 	int err = 0;
2232 
2233 	if (!chip_name) {
2234 		dev_err(adev->dev, "invalid chip name for sos microcode\n");
2235 		return -EINVAL;
2236 	}
2237 
2238 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
2239 	err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
2240 	if (err)
2241 		goto out;
2242 
2243 	err = amdgpu_ucode_validate(adev->psp.sos_fw);
2244 	if (err)
2245 		goto out;
2246 
2247 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
2248 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
2249 
2250 	switch (sos_hdr->header.header_version_major) {
2251 	case 1:
2252 		adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
2253 		adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
2254 		adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
2255 		adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
2256 		adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
2257 				le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
2258 		adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2259 				le32_to_cpu(sos_hdr->sos_offset_bytes);
2260 		if (sos_hdr->header.header_version_minor == 1) {
2261 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
2262 			adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
2263 			adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2264 					le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
2265 			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
2266 			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2267 					le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
2268 		}
2269 		if (sos_hdr->header.header_version_minor == 2) {
2270 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
2271 			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
2272 			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2273 						    le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
2274 		}
2275 		if (sos_hdr->header.header_version_minor == 3) {
2276 			sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
2277 			adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc_size_bytes);
2278 			adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2279 				le32_to_cpu(sos_hdr_v1_3->v1_1.toc_offset_bytes);
2280 			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_size_bytes);
2281 			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2282 				le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_offset_bytes);
2283 			adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes);
2284 			adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2285 				le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes);
2286 		}
2287 		break;
2288 	default:
2289 		dev_err(adev->dev,
2290 			"unsupported psp sos firmware\n");
2291 		err = -EINVAL;
2292 		goto out;
2293 	}
2294 
2295 	return 0;
2296 out:
2297 	dev_err(adev->dev,
2298 		"failed to init sos firmware\n");
2299 	release_firmware(adev->psp.sos_fw);
2300 	adev->psp.sos_fw = NULL;
2301 
2302 	return err;
2303 }
2304 
2305 int parse_ta_bin_descriptor(struct psp_context *psp,
2306 			    const struct ta_fw_bin_desc *desc,
2307 			    const struct ta_firmware_header_v2_0 *ta_hdr)
2308 {
2309 	uint8_t *ucode_start_addr  = NULL;
2310 
2311 	if (!psp || !desc || !ta_hdr)
2312 		return -EINVAL;
2313 
2314 	ucode_start_addr  = (uint8_t *)ta_hdr +
2315 			    le32_to_cpu(desc->offset_bytes) +
2316 			    le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
2317 
2318 	switch (desc->fw_type) {
2319 	case TA_FW_TYPE_PSP_ASD:
2320 		psp->asd_fw_version 	   = le32_to_cpu(desc->fw_version);
2321 		psp->asd_feature_version   = le32_to_cpu(desc->fw_version);
2322 		psp->asd_ucode_size 	   = le32_to_cpu(desc->size_bytes);
2323 		psp->asd_start_addr 	   = ucode_start_addr;
2324 		break;
2325 	case TA_FW_TYPE_PSP_XGMI:
2326 		psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
2327 		psp->ta_xgmi_ucode_size    = le32_to_cpu(desc->size_bytes);
2328 		psp->ta_xgmi_start_addr    = ucode_start_addr;
2329 		break;
2330 	case TA_FW_TYPE_PSP_RAS:
2331 		psp->ta_ras_ucode_version  = le32_to_cpu(desc->fw_version);
2332 		psp->ta_ras_ucode_size     = le32_to_cpu(desc->size_bytes);
2333 		psp->ta_ras_start_addr     = ucode_start_addr;
2334 		break;
2335 	case TA_FW_TYPE_PSP_HDCP:
2336 		psp->ta_hdcp_ucode_version = le32_to_cpu(desc->fw_version);
2337 		psp->ta_hdcp_ucode_size    = le32_to_cpu(desc->size_bytes);
2338 		psp->ta_hdcp_start_addr    = ucode_start_addr;
2339 		break;
2340 	case TA_FW_TYPE_PSP_DTM:
2341 		psp->ta_dtm_ucode_version  = le32_to_cpu(desc->fw_version);
2342 		psp->ta_dtm_ucode_size     = le32_to_cpu(desc->size_bytes);
2343 		psp->ta_dtm_start_addr     = ucode_start_addr;
2344 		break;
2345 	default:
2346 		dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
2347 		break;
2348 	}
2349 
2350 	return 0;
2351 }
2352 
2353 int psp_init_ta_microcode(struct psp_context *psp,
2354 			  const char *chip_name)
2355 {
2356 	struct amdgpu_device *adev = psp->adev;
2357 	char fw_name[30];
2358 	const struct ta_firmware_header_v2_0 *ta_hdr;
2359 	int err = 0;
2360 	int ta_index = 0;
2361 
2362 	if (!chip_name) {
2363 		dev_err(adev->dev, "invalid chip name for ta microcode\n");
2364 		return -EINVAL;
2365 	}
2366 
2367 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
2368 	err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
2369 	if (err)
2370 		goto out;
2371 
2372 	err = amdgpu_ucode_validate(adev->psp.ta_fw);
2373 	if (err)
2374 		goto out;
2375 
2376 	ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
2377 
2378 	if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) {
2379 		dev_err(adev->dev, "unsupported TA header version\n");
2380 		err = -EINVAL;
2381 		goto out;
2382 	}
2383 
2384 	if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_TA_PACKAGING) {
2385 		dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
2386 		err = -EINVAL;
2387 		goto out;
2388 	}
2389 
2390 	for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
2391 		err = parse_ta_bin_descriptor(psp,
2392 					      &ta_hdr->ta_fw_bin[ta_index],
2393 					      ta_hdr);
2394 		if (err)
2395 			goto out;
2396 	}
2397 
2398 	return 0;
2399 out:
2400 	dev_err(adev->dev, "fail to initialize ta microcode\n");
2401 	release_firmware(adev->psp.ta_fw);
2402 	adev->psp.ta_fw = NULL;
2403 	return err;
2404 }
2405 
2406 static int psp_set_clockgating_state(void *handle,
2407 				     enum amd_clockgating_state state)
2408 {
2409 	return 0;
2410 }
2411 
2412 static int psp_set_powergating_state(void *handle,
2413 				     enum amd_powergating_state state)
2414 {
2415 	return 0;
2416 }
2417 
2418 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
2419 					 struct device_attribute *attr,
2420 					 char *buf)
2421 {
2422 	struct drm_device *ddev = dev_get_drvdata(dev);
2423 	struct amdgpu_device *adev = ddev->dev_private;
2424 	uint32_t fw_ver;
2425 	int ret;
2426 
2427 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
2428 		DRM_INFO("PSP block is not ready yet.");
2429 		return -EBUSY;
2430 	}
2431 
2432 	mutex_lock(&adev->psp.mutex);
2433 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
2434 	mutex_unlock(&adev->psp.mutex);
2435 
2436 	if (ret) {
2437 		DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
2438 		return ret;
2439 	}
2440 
2441 	return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
2442 }
2443 
2444 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
2445 						       struct device_attribute *attr,
2446 						       const char *buf,
2447 						       size_t count)
2448 {
2449 	struct drm_device *ddev = dev_get_drvdata(dev);
2450 	struct amdgpu_device *adev = ddev->dev_private;
2451 	void *cpu_addr;
2452 	dma_addr_t dma_addr;
2453 	int ret;
2454 	char fw_name[100];
2455 	const struct firmware *usbc_pd_fw;
2456 
2457 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
2458 		DRM_INFO("PSP block is not ready yet.");
2459 		return -EBUSY;
2460 	}
2461 
2462 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
2463 	ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
2464 	if (ret)
2465 		goto fail;
2466 
2467 	/* We need contiguous physical mem to place the FW  for psp to access */
2468 	cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
2469 
2470 	ret = dma_mapping_error(adev->dev, dma_addr);
2471 	if (ret)
2472 		goto rel_buf;
2473 
2474 	memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
2475 
2476 	/*
2477 	 * x86 specific workaround.
2478 	 * Without it the buffer is invisible in PSP.
2479 	 *
2480 	 * TODO Remove once PSP starts snooping CPU cache
2481 	 */
2482 #ifdef CONFIG_X86
2483 	clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
2484 #endif
2485 
2486 	mutex_lock(&adev->psp.mutex);
2487 	ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
2488 	mutex_unlock(&adev->psp.mutex);
2489 
2490 rel_buf:
2491 	dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
2492 	release_firmware(usbc_pd_fw);
2493 
2494 fail:
2495 	if (ret) {
2496 		DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
2497 		return ret;
2498 	}
2499 
2500 	return count;
2501 }
2502 
2503 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
2504 		   psp_usbc_pd_fw_sysfs_read,
2505 		   psp_usbc_pd_fw_sysfs_write);
2506 
2507 
2508 
2509 const struct amd_ip_funcs psp_ip_funcs = {
2510 	.name = "psp",
2511 	.early_init = psp_early_init,
2512 	.late_init = NULL,
2513 	.sw_init = psp_sw_init,
2514 	.sw_fini = psp_sw_fini,
2515 	.hw_init = psp_hw_init,
2516 	.hw_fini = psp_hw_fini,
2517 	.suspend = psp_suspend,
2518 	.resume = psp_resume,
2519 	.is_idle = NULL,
2520 	.check_soft_reset = NULL,
2521 	.wait_for_idle = NULL,
2522 	.soft_reset = NULL,
2523 	.set_clockgating_state = psp_set_clockgating_state,
2524 	.set_powergating_state = psp_set_powergating_state,
2525 };
2526 
2527 static int psp_sysfs_init(struct amdgpu_device *adev)
2528 {
2529 	int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
2530 
2531 	if (ret)
2532 		DRM_ERROR("Failed to create USBC PD FW control file!");
2533 
2534 	return ret;
2535 }
2536 
2537 static void psp_sysfs_fini(struct amdgpu_device *adev)
2538 {
2539 	device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
2540 }
2541 
2542 const struct amdgpu_ip_block_version psp_v3_1_ip_block =
2543 {
2544 	.type = AMD_IP_BLOCK_TYPE_PSP,
2545 	.major = 3,
2546 	.minor = 1,
2547 	.rev = 0,
2548 	.funcs = &psp_ip_funcs,
2549 };
2550 
2551 const struct amdgpu_ip_block_version psp_v10_0_ip_block =
2552 {
2553 	.type = AMD_IP_BLOCK_TYPE_PSP,
2554 	.major = 10,
2555 	.minor = 0,
2556 	.rev = 0,
2557 	.funcs = &psp_ip_funcs,
2558 };
2559 
2560 const struct amdgpu_ip_block_version psp_v11_0_ip_block =
2561 {
2562 	.type = AMD_IP_BLOCK_TYPE_PSP,
2563 	.major = 11,
2564 	.minor = 0,
2565 	.rev = 0,
2566 	.funcs = &psp_ip_funcs,
2567 };
2568 
2569 const struct amdgpu_ip_block_version psp_v12_0_ip_block =
2570 {
2571 	.type = AMD_IP_BLOCK_TYPE_PSP,
2572 	.major = 12,
2573 	.minor = 0,
2574 	.rev = 0,
2575 	.funcs = &psp_ip_funcs,
2576 };
2577