1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Author: Huang Rui
23  *
24  */
25 
26 #include <linux/firmware.h>
27 #include <linux/dma-mapping.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "soc15_common.h"
33 #include "psp_v3_1.h"
34 #include "psp_v10_0.h"
35 #include "psp_v11_0.h"
36 #include "psp_v12_0.h"
37 
38 #include "amdgpu_ras.h"
39 
40 static int psp_sysfs_init(struct amdgpu_device *adev);
41 static void psp_sysfs_fini(struct amdgpu_device *adev);
42 
43 static int psp_load_smu_fw(struct psp_context *psp);
44 
45 /*
46  * Due to DF Cstate management centralized to PMFW, the firmware
47  * loading sequence will be updated as below:
48  *   - Load KDB
49  *   - Load SYS_DRV
50  *   - Load tOS
51  *   - Load PMFW
52  *   - Setup TMR
53  *   - Load other non-psp fw
54  *   - Load ASD
55  *   - Load XGMI/RAS/HDCP/DTM TA if any
56  *
57  * This new sequence is required for
58  *   - Arcturus
59  *   - Navi12 and onwards
60  */
61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
62 {
63 	struct amdgpu_device *adev = psp->adev;
64 
65 	psp->pmfw_centralized_cstate_management = false;
66 
67 	if (amdgpu_sriov_vf(adev))
68 		return;
69 
70 	if (adev->flags & AMD_IS_APU)
71 		return;
72 
73 	if ((adev->asic_type == CHIP_ARCTURUS) ||
74 	    (adev->asic_type >= CHIP_NAVI12))
75 		psp->pmfw_centralized_cstate_management = true;
76 }
77 
78 static int psp_early_init(void *handle)
79 {
80 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
81 	struct psp_context *psp = &adev->psp;
82 
83 	switch (adev->asic_type) {
84 	case CHIP_VEGA10:
85 	case CHIP_VEGA12:
86 		psp_v3_1_set_psp_funcs(psp);
87 		psp->autoload_supported = false;
88 		break;
89 	case CHIP_RAVEN:
90 		psp_v10_0_set_psp_funcs(psp);
91 		psp->autoload_supported = false;
92 		break;
93 	case CHIP_VEGA20:
94 	case CHIP_ARCTURUS:
95 		psp_v11_0_set_psp_funcs(psp);
96 		psp->autoload_supported = false;
97 		break;
98 	case CHIP_NAVI10:
99 	case CHIP_NAVI14:
100 	case CHIP_NAVI12:
101 		psp_v11_0_set_psp_funcs(psp);
102 		psp->autoload_supported = true;
103 		break;
104 	case CHIP_RENOIR:
105 		psp_v12_0_set_psp_funcs(psp);
106 		break;
107 	default:
108 		return -EINVAL;
109 	}
110 
111 	psp->adev = adev;
112 
113 	psp_check_pmfw_centralized_cstate_management(psp);
114 
115 	return 0;
116 }
117 
118 static int psp_sw_init(void *handle)
119 {
120 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
121 	struct psp_context *psp = &adev->psp;
122 	int ret;
123 
124 	ret = psp_init_microcode(psp);
125 	if (ret) {
126 		DRM_ERROR("Failed to load psp firmware!\n");
127 		return ret;
128 	}
129 
130 	ret = psp_mem_training_init(psp);
131 	if (ret) {
132 		DRM_ERROR("Failed to initialize memory training!\n");
133 		return ret;
134 	}
135 	ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
136 	if (ret) {
137 		DRM_ERROR("Failed to process memory training!\n");
138 		return ret;
139 	}
140 
141 	if (adev->asic_type == CHIP_NAVI10) {
142 		ret= psp_sysfs_init(adev);
143 		if (ret) {
144 			return ret;
145 		}
146 	}
147 
148 	return 0;
149 }
150 
151 static int psp_sw_fini(void *handle)
152 {
153 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
154 
155 	psp_mem_training_fini(&adev->psp);
156 	release_firmware(adev->psp.sos_fw);
157 	adev->psp.sos_fw = NULL;
158 	release_firmware(adev->psp.asd_fw);
159 	adev->psp.asd_fw = NULL;
160 	if (adev->psp.ta_fw) {
161 		release_firmware(adev->psp.ta_fw);
162 		adev->psp.ta_fw = NULL;
163 	}
164 
165 	if (adev->asic_type == CHIP_NAVI10)
166 		psp_sysfs_fini(adev);
167 
168 	return 0;
169 }
170 
171 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
172 		 uint32_t reg_val, uint32_t mask, bool check_changed)
173 {
174 	uint32_t val;
175 	int i;
176 	struct amdgpu_device *adev = psp->adev;
177 
178 	for (i = 0; i < adev->usec_timeout; i++) {
179 		val = RREG32(reg_index);
180 		if (check_changed) {
181 			if (val != reg_val)
182 				return 0;
183 		} else {
184 			if ((val & mask) == reg_val)
185 				return 0;
186 		}
187 		udelay(1);
188 	}
189 
190 	return -ETIME;
191 }
192 
193 static int
194 psp_cmd_submit_buf(struct psp_context *psp,
195 		   struct amdgpu_firmware_info *ucode,
196 		   struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
197 {
198 	int ret;
199 	int index;
200 	int timeout = 2000;
201 	bool ras_intr = false;
202 	bool skip_unsupport = false;
203 
204 	mutex_lock(&psp->mutex);
205 
206 	memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
207 
208 	memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
209 
210 	index = atomic_inc_return(&psp->fence_value);
211 	ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
212 	if (ret) {
213 		atomic_dec(&psp->fence_value);
214 		mutex_unlock(&psp->mutex);
215 		return ret;
216 	}
217 
218 	amdgpu_asic_invalidate_hdp(psp->adev, NULL);
219 	while (*((unsigned int *)psp->fence_buf) != index) {
220 		if (--timeout == 0)
221 			break;
222 		/*
223 		 * Shouldn't wait for timeout when err_event_athub occurs,
224 		 * because gpu reset thread triggered and lock resource should
225 		 * be released for psp resume sequence.
226 		 */
227 		ras_intr = amdgpu_ras_intr_triggered();
228 		if (ras_intr)
229 			break;
230 		msleep(1);
231 		amdgpu_asic_invalidate_hdp(psp->adev, NULL);
232 	}
233 
234 	/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command in SRIOV */
235 	skip_unsupport = (psp->cmd_buf_mem->resp.status == 0xffff000a) && amdgpu_sriov_vf(psp->adev);
236 
237 	/* In some cases, psp response status is not 0 even there is no
238 	 * problem while the command is submitted. Some version of PSP FW
239 	 * doesn't write 0 to that field.
240 	 * So here we would like to only print a warning instead of an error
241 	 * during psp initialization to avoid breaking hw_init and it doesn't
242 	 * return -EINVAL.
243 	 */
244 	if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
245 		if (ucode)
246 			DRM_WARN("failed to load ucode id (%d) ",
247 				  ucode->ucode_id);
248 		DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
249 			 psp->cmd_buf_mem->cmd_id,
250 			 psp->cmd_buf_mem->resp.status);
251 		if (!timeout) {
252 			mutex_unlock(&psp->mutex);
253 			return -EINVAL;
254 		}
255 	}
256 
257 	/* get xGMI session id from response buffer */
258 	cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id;
259 
260 	if (ucode) {
261 		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
262 		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
263 	}
264 	mutex_unlock(&psp->mutex);
265 
266 	return ret;
267 }
268 
269 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
270 				 struct psp_gfx_cmd_resp *cmd,
271 				 uint64_t tmr_mc, uint32_t size)
272 {
273 	if (amdgpu_sriov_vf(psp->adev))
274 		cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
275 	else
276 		cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
277 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
278 	cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
279 	cmd->cmd.cmd_setup_tmr.buf_size = size;
280 }
281 
282 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
283 				      uint64_t pri_buf_mc, uint32_t size)
284 {
285 	cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
286 	cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
287 	cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
288 	cmd->cmd.cmd_load_toc.toc_size = size;
289 }
290 
291 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
292 static int psp_load_toc(struct psp_context *psp,
293 			uint32_t *tmr_size)
294 {
295 	int ret;
296 	struct psp_gfx_cmd_resp *cmd;
297 
298 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
299 	if (!cmd)
300 		return -ENOMEM;
301 	/* Copy toc to psp firmware private buffer */
302 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
303 	memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size);
304 
305 	psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size);
306 
307 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
308 				 psp->fence_buf_mc_addr);
309 	if (!ret)
310 		*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
311 	kfree(cmd);
312 	return ret;
313 }
314 
315 /* Set up Trusted Memory Region */
316 static int psp_tmr_init(struct psp_context *psp)
317 {
318 	int ret;
319 	int tmr_size;
320 	void *tmr_buf;
321 	void **pptr;
322 
323 	/*
324 	 * According to HW engineer, they prefer the TMR address be "naturally
325 	 * aligned" , e.g. the start address be an integer divide of TMR size.
326 	 *
327 	 * Note: this memory need be reserved till the driver
328 	 * uninitializes.
329 	 */
330 	tmr_size = PSP_TMR_SIZE;
331 
332 	/* For ASICs support RLC autoload, psp will parse the toc
333 	 * and calculate the total size of TMR needed */
334 	if (!amdgpu_sriov_vf(psp->adev) &&
335 	    psp->toc_start_addr &&
336 	    psp->toc_bin_size &&
337 	    psp->fw_pri_buf) {
338 		ret = psp_load_toc(psp, &tmr_size);
339 		if (ret) {
340 			DRM_ERROR("Failed to load toc\n");
341 			return ret;
342 		}
343 	}
344 
345 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
346 	ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
347 				      AMDGPU_GEM_DOMAIN_VRAM,
348 				      &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
349 
350 	return ret;
351 }
352 
353 static int psp_tmr_load(struct psp_context *psp)
354 {
355 	int ret;
356 	struct psp_gfx_cmd_resp *cmd;
357 
358 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
359 	if (!cmd)
360 		return -ENOMEM;
361 
362 	psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr,
363 			     amdgpu_bo_size(psp->tmr_bo));
364 	DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
365 		 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
366 
367 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
368 				 psp->fence_buf_mc_addr);
369 
370 	kfree(cmd);
371 
372 	return ret;
373 }
374 
375 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
376 				uint64_t asd_mc, uint32_t size)
377 {
378 	cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
379 	cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
380 	cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
381 	cmd->cmd.cmd_load_ta.app_len = size;
382 
383 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
384 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
385 	cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
386 }
387 
388 static int psp_asd_load(struct psp_context *psp)
389 {
390 	int ret;
391 	struct psp_gfx_cmd_resp *cmd;
392 
393 	/* If PSP version doesn't match ASD version, asd loading will be failed.
394 	 * add workaround to bypass it for sriov now.
395 	 * TODO: add version check to make it common
396 	 */
397 	if (amdgpu_sriov_vf(psp->adev))
398 		return 0;
399 
400 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
401 	if (!cmd)
402 		return -ENOMEM;
403 
404 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
405 	memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
406 
407 	psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
408 				  psp->asd_ucode_size);
409 
410 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
411 				 psp->fence_buf_mc_addr);
412 	if (!ret) {
413 		psp->asd_context.asd_initialized = true;
414 		psp->asd_context.session_id = cmd->resp.session_id;
415 	}
416 
417 	kfree(cmd);
418 
419 	return ret;
420 }
421 
422 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
423 				       uint32_t session_id)
424 {
425 	cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
426 	cmd->cmd.cmd_unload_ta.session_id = session_id;
427 }
428 
429 static int psp_asd_unload(struct psp_context *psp)
430 {
431 	int ret;
432 	struct psp_gfx_cmd_resp *cmd;
433 
434 	if (amdgpu_sriov_vf(psp->adev))
435 		return 0;
436 
437 	if (!psp->asd_context.asd_initialized)
438 		return 0;
439 
440 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
441 	if (!cmd)
442 		return -ENOMEM;
443 
444 	psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
445 
446 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
447 				 psp->fence_buf_mc_addr);
448 	if (!ret)
449 		psp->asd_context.asd_initialized = false;
450 
451 	kfree(cmd);
452 
453 	return ret;
454 }
455 
456 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
457 		uint32_t id, uint32_t value)
458 {
459 	cmd->cmd_id = GFX_CMD_ID_PROG_REG;
460 	cmd->cmd.cmd_setup_reg_prog.reg_value = value;
461 	cmd->cmd.cmd_setup_reg_prog.reg_id = id;
462 }
463 
464 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
465 		uint32_t value)
466 {
467 	struct psp_gfx_cmd_resp *cmd = NULL;
468 	int ret = 0;
469 
470 	if (reg >= PSP_REG_LAST)
471 		return -EINVAL;
472 
473 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
474 	if (!cmd)
475 		return -ENOMEM;
476 
477 	psp_prep_reg_prog_cmd_buf(cmd, reg, value);
478 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
479 
480 	kfree(cmd);
481 	return ret;
482 }
483 
484 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
485 				     uint64_t ta_bin_mc,
486 				     uint32_t ta_bin_size,
487 				     uint64_t ta_shared_mc,
488 				     uint32_t ta_shared_size)
489 {
490 	cmd->cmd_id 				= GFX_CMD_ID_LOAD_TA;
491 	cmd->cmd.cmd_load_ta.app_phy_addr_lo 	= lower_32_bits(ta_bin_mc);
492 	cmd->cmd.cmd_load_ta.app_phy_addr_hi 	= upper_32_bits(ta_bin_mc);
493 	cmd->cmd.cmd_load_ta.app_len 		= ta_bin_size;
494 
495 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
496 	cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
497 	cmd->cmd.cmd_load_ta.cmd_buf_len 	 = ta_shared_size;
498 }
499 
500 static int psp_xgmi_init_shared_buf(struct psp_context *psp)
501 {
502 	int ret;
503 
504 	/*
505 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
506 	 * physical) for xgmi ta <-> Driver
507 	 */
508 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
509 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
510 				      &psp->xgmi_context.xgmi_shared_bo,
511 				      &psp->xgmi_context.xgmi_shared_mc_addr,
512 				      &psp->xgmi_context.xgmi_shared_buf);
513 
514 	return ret;
515 }
516 
517 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
518 				       uint32_t ta_cmd_id,
519 				       uint32_t session_id)
520 {
521 	cmd->cmd_id 				= GFX_CMD_ID_INVOKE_CMD;
522 	cmd->cmd.cmd_invoke_cmd.session_id 	= session_id;
523 	cmd->cmd.cmd_invoke_cmd.ta_cmd_id 	= ta_cmd_id;
524 }
525 
526 int psp_ta_invoke(struct psp_context *psp,
527 		  uint32_t ta_cmd_id,
528 		  uint32_t session_id)
529 {
530 	int ret;
531 	struct psp_gfx_cmd_resp *cmd;
532 
533 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
534 	if (!cmd)
535 		return -ENOMEM;
536 
537 	psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
538 
539 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
540 				 psp->fence_buf_mc_addr);
541 
542 	kfree(cmd);
543 
544 	return ret;
545 }
546 
547 static int psp_xgmi_load(struct psp_context *psp)
548 {
549 	int ret;
550 	struct psp_gfx_cmd_resp *cmd;
551 
552 	/*
553 	 * TODO: bypass the loading in sriov for now
554 	 */
555 
556 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
557 	if (!cmd)
558 		return -ENOMEM;
559 
560 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
561 	memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
562 
563 	psp_prep_ta_load_cmd_buf(cmd,
564 				 psp->fw_pri_mc_addr,
565 				 psp->ta_xgmi_ucode_size,
566 				 psp->xgmi_context.xgmi_shared_mc_addr,
567 				 PSP_XGMI_SHARED_MEM_SIZE);
568 
569 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
570 				 psp->fence_buf_mc_addr);
571 
572 	if (!ret) {
573 		psp->xgmi_context.initialized = 1;
574 		psp->xgmi_context.session_id = cmd->resp.session_id;
575 	}
576 
577 	kfree(cmd);
578 
579 	return ret;
580 }
581 
582 static int psp_xgmi_unload(struct psp_context *psp)
583 {
584 	int ret;
585 	struct psp_gfx_cmd_resp *cmd;
586 	struct amdgpu_device *adev = psp->adev;
587 
588 	/* XGMI TA unload currently is not supported on Arcturus */
589 	if (adev->asic_type == CHIP_ARCTURUS)
590 		return 0;
591 
592 	/*
593 	 * TODO: bypass the unloading in sriov for now
594 	 */
595 
596 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
597 	if (!cmd)
598 		return -ENOMEM;
599 
600 	psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
601 
602 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
603 				 psp->fence_buf_mc_addr);
604 
605 	kfree(cmd);
606 
607 	return ret;
608 }
609 
610 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
611 {
612 	return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
613 }
614 
615 int psp_xgmi_terminate(struct psp_context *psp)
616 {
617 	int ret;
618 
619 	if (!psp->xgmi_context.initialized)
620 		return 0;
621 
622 	ret = psp_xgmi_unload(psp);
623 	if (ret)
624 		return ret;
625 
626 	psp->xgmi_context.initialized = 0;
627 
628 	/* free xgmi shared memory */
629 	amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
630 			&psp->xgmi_context.xgmi_shared_mc_addr,
631 			&psp->xgmi_context.xgmi_shared_buf);
632 
633 	return 0;
634 }
635 
636 int psp_xgmi_initialize(struct psp_context *psp)
637 {
638 	struct ta_xgmi_shared_memory *xgmi_cmd;
639 	int ret;
640 
641 	if (!psp->adev->psp.ta_fw ||
642 	    !psp->adev->psp.ta_xgmi_ucode_size ||
643 	    !psp->adev->psp.ta_xgmi_start_addr)
644 		return -ENOENT;
645 
646 	if (!psp->xgmi_context.initialized) {
647 		ret = psp_xgmi_init_shared_buf(psp);
648 		if (ret)
649 			return ret;
650 	}
651 
652 	/* Load XGMI TA */
653 	ret = psp_xgmi_load(psp);
654 	if (ret)
655 		return ret;
656 
657 	/* Initialize XGMI session */
658 	xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
659 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
660 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
661 
662 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
663 
664 	return ret;
665 }
666 
667 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
668 {
669 	struct ta_xgmi_shared_memory *xgmi_cmd;
670 	int ret;
671 
672 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
673 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
674 
675 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
676 
677 	/* Invoke xgmi ta to get hive id */
678 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
679 	if (ret)
680 		return ret;
681 
682 	*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
683 
684 	return 0;
685 }
686 
687 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
688 {
689 	struct ta_xgmi_shared_memory *xgmi_cmd;
690 	int ret;
691 
692 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
693 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
694 
695 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
696 
697 	/* Invoke xgmi ta to get the node id */
698 	ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
699 	if (ret)
700 		return ret;
701 
702 	*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
703 
704 	return 0;
705 }
706 
707 int psp_xgmi_get_topology_info(struct psp_context *psp,
708 			       int number_devices,
709 			       struct psp_xgmi_topology_info *topology)
710 {
711 	struct ta_xgmi_shared_memory *xgmi_cmd;
712 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
713 	struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
714 	int i;
715 	int ret;
716 
717 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
718 		return -EINVAL;
719 
720 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
721 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
722 
723 	/* Fill in the shared memory with topology information as input */
724 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
725 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
726 	topology_info_input->num_nodes = number_devices;
727 
728 	for (i = 0; i < topology_info_input->num_nodes; i++) {
729 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
730 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
731 		topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
732 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
733 	}
734 
735 	/* Invoke xgmi ta to get the topology information */
736 	ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
737 	if (ret)
738 		return ret;
739 
740 	/* Read the output topology information from the shared memory */
741 	topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
742 	topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
743 	for (i = 0; i < topology->num_nodes; i++) {
744 		topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
745 		topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
746 		topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
747 		topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
748 	}
749 
750 	return 0;
751 }
752 
753 int psp_xgmi_set_topology_info(struct psp_context *psp,
754 			       int number_devices,
755 			       struct psp_xgmi_topology_info *topology)
756 {
757 	struct ta_xgmi_shared_memory *xgmi_cmd;
758 	struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
759 	int i;
760 
761 	if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
762 		return -EINVAL;
763 
764 	xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
765 	memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
766 
767 	topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
768 	xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
769 	topology_info_input->num_nodes = number_devices;
770 
771 	for (i = 0; i < topology_info_input->num_nodes; i++) {
772 		topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
773 		topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
774 		topology_info_input->nodes[i].is_sharing_enabled = 1;
775 		topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
776 	}
777 
778 	/* Invoke xgmi ta to set topology information */
779 	return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
780 }
781 
782 // ras begin
783 static int psp_ras_init_shared_buf(struct psp_context *psp)
784 {
785 	int ret;
786 
787 	/*
788 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
789 	 * physical) for ras ta <-> Driver
790 	 */
791 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
792 			PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
793 			&psp->ras.ras_shared_bo,
794 			&psp->ras.ras_shared_mc_addr,
795 			&psp->ras.ras_shared_buf);
796 
797 	return ret;
798 }
799 
800 static int psp_ras_load(struct psp_context *psp)
801 {
802 	int ret;
803 	struct psp_gfx_cmd_resp *cmd;
804 
805 	/*
806 	 * TODO: bypass the loading in sriov for now
807 	 */
808 	if (amdgpu_sriov_vf(psp->adev))
809 		return 0;
810 
811 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
812 	if (!cmd)
813 		return -ENOMEM;
814 
815 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
816 	memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
817 
818 	psp_prep_ta_load_cmd_buf(cmd,
819 				 psp->fw_pri_mc_addr,
820 				 psp->ta_ras_ucode_size,
821 				 psp->ras.ras_shared_mc_addr,
822 				 PSP_RAS_SHARED_MEM_SIZE);
823 
824 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
825 			psp->fence_buf_mc_addr);
826 
827 	if (!ret) {
828 		psp->ras.ras_initialized = true;
829 		psp->ras.session_id = cmd->resp.session_id;
830 	}
831 
832 	kfree(cmd);
833 
834 	return ret;
835 }
836 
837 static int psp_ras_unload(struct psp_context *psp)
838 {
839 	int ret;
840 	struct psp_gfx_cmd_resp *cmd;
841 
842 	/*
843 	 * TODO: bypass the unloading in sriov for now
844 	 */
845 	if (amdgpu_sriov_vf(psp->adev))
846 		return 0;
847 
848 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
849 	if (!cmd)
850 		return -ENOMEM;
851 
852 	psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
853 
854 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
855 			psp->fence_buf_mc_addr);
856 
857 	kfree(cmd);
858 
859 	return ret;
860 }
861 
862 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
863 {
864 	struct ta_ras_shared_memory *ras_cmd;
865 	int ret;
866 
867 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
868 
869 	/*
870 	 * TODO: bypass the loading in sriov for now
871 	 */
872 	if (amdgpu_sriov_vf(psp->adev))
873 		return 0;
874 
875 	ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
876 
877 	if (amdgpu_ras_intr_triggered())
878 		return ret;
879 
880 	if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
881 	{
882 		DRM_WARN("RAS: Unsupported Interface");
883 		return -EINVAL;
884 	}
885 
886 	if (!ret) {
887 		if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
888 			dev_warn(psp->adev->dev, "ECC switch disabled\n");
889 
890 			ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
891 		}
892 		else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
893 			dev_warn(psp->adev->dev,
894 				 "RAS internal register access blocked\n");
895 	}
896 
897 	return ret;
898 }
899 
900 int psp_ras_enable_features(struct psp_context *psp,
901 		union ta_ras_cmd_input *info, bool enable)
902 {
903 	struct ta_ras_shared_memory *ras_cmd;
904 	int ret;
905 
906 	if (!psp->ras.ras_initialized)
907 		return -EINVAL;
908 
909 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
910 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
911 
912 	if (enable)
913 		ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
914 	else
915 		ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
916 
917 	ras_cmd->ras_in_message = *info;
918 
919 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
920 	if (ret)
921 		return -EINVAL;
922 
923 	return ras_cmd->ras_status;
924 }
925 
926 static int psp_ras_terminate(struct psp_context *psp)
927 {
928 	int ret;
929 
930 	/*
931 	 * TODO: bypass the terminate in sriov for now
932 	 */
933 	if (amdgpu_sriov_vf(psp->adev))
934 		return 0;
935 
936 	if (!psp->ras.ras_initialized)
937 		return 0;
938 
939 	ret = psp_ras_unload(psp);
940 	if (ret)
941 		return ret;
942 
943 	psp->ras.ras_initialized = false;
944 
945 	/* free ras shared memory */
946 	amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
947 			&psp->ras.ras_shared_mc_addr,
948 			&psp->ras.ras_shared_buf);
949 
950 	return 0;
951 }
952 
953 static int psp_ras_initialize(struct psp_context *psp)
954 {
955 	int ret;
956 
957 	/*
958 	 * TODO: bypass the initialize in sriov for now
959 	 */
960 	if (amdgpu_sriov_vf(psp->adev))
961 		return 0;
962 
963 	if (!psp->adev->psp.ta_ras_ucode_size ||
964 	    !psp->adev->psp.ta_ras_start_addr) {
965 		dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
966 		return 0;
967 	}
968 
969 	if (!psp->ras.ras_initialized) {
970 		ret = psp_ras_init_shared_buf(psp);
971 		if (ret)
972 			return ret;
973 	}
974 
975 	ret = psp_ras_load(psp);
976 	if (ret)
977 		return ret;
978 
979 	return 0;
980 }
981 
982 int psp_ras_trigger_error(struct psp_context *psp,
983 			  struct ta_ras_trigger_error_input *info)
984 {
985 	struct ta_ras_shared_memory *ras_cmd;
986 	int ret;
987 
988 	if (!psp->ras.ras_initialized)
989 		return -EINVAL;
990 
991 	ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
992 	memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
993 
994 	ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
995 	ras_cmd->ras_in_message.trigger_error = *info;
996 
997 	ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
998 	if (ret)
999 		return -EINVAL;
1000 
1001 	/* If err_event_athub occurs error inject was successful, however
1002 	   return status from TA is no long reliable */
1003 	if (amdgpu_ras_intr_triggered())
1004 		return 0;
1005 
1006 	return ras_cmd->ras_status;
1007 }
1008 // ras end
1009 
1010 // HDCP start
1011 static int psp_hdcp_init_shared_buf(struct psp_context *psp)
1012 {
1013 	int ret;
1014 
1015 	/*
1016 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1017 	 * physical) for hdcp ta <-> Driver
1018 	 */
1019 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
1020 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1021 				      &psp->hdcp_context.hdcp_shared_bo,
1022 				      &psp->hdcp_context.hdcp_shared_mc_addr,
1023 				      &psp->hdcp_context.hdcp_shared_buf);
1024 
1025 	return ret;
1026 }
1027 
1028 static int psp_hdcp_load(struct psp_context *psp)
1029 {
1030 	int ret;
1031 	struct psp_gfx_cmd_resp *cmd;
1032 
1033 	/*
1034 	 * TODO: bypass the loading in sriov for now
1035 	 */
1036 	if (amdgpu_sriov_vf(psp->adev))
1037 		return 0;
1038 
1039 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1040 	if (!cmd)
1041 		return -ENOMEM;
1042 
1043 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1044 	memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
1045 	       psp->ta_hdcp_ucode_size);
1046 
1047 	psp_prep_ta_load_cmd_buf(cmd,
1048 				 psp->fw_pri_mc_addr,
1049 				 psp->ta_hdcp_ucode_size,
1050 				 psp->hdcp_context.hdcp_shared_mc_addr,
1051 				 PSP_HDCP_SHARED_MEM_SIZE);
1052 
1053 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1054 
1055 	if (!ret) {
1056 		psp->hdcp_context.hdcp_initialized = true;
1057 		psp->hdcp_context.session_id = cmd->resp.session_id;
1058 		mutex_init(&psp->hdcp_context.mutex);
1059 	}
1060 
1061 	kfree(cmd);
1062 
1063 	return ret;
1064 }
1065 static int psp_hdcp_initialize(struct psp_context *psp)
1066 {
1067 	int ret;
1068 
1069 	/*
1070 	 * TODO: bypass the initialize in sriov for now
1071 	 */
1072 	if (amdgpu_sriov_vf(psp->adev))
1073 		return 0;
1074 
1075 	if (!psp->adev->psp.ta_hdcp_ucode_size ||
1076 	    !psp->adev->psp.ta_hdcp_start_addr) {
1077 		dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1078 		return 0;
1079 	}
1080 
1081 	if (!psp->hdcp_context.hdcp_initialized) {
1082 		ret = psp_hdcp_init_shared_buf(psp);
1083 		if (ret)
1084 			return ret;
1085 	}
1086 
1087 	ret = psp_hdcp_load(psp);
1088 	if (ret)
1089 		return ret;
1090 
1091 	return 0;
1092 }
1093 
1094 static int psp_hdcp_unload(struct psp_context *psp)
1095 {
1096 	int ret;
1097 	struct psp_gfx_cmd_resp *cmd;
1098 
1099 	/*
1100 	 * TODO: bypass the unloading in sriov for now
1101 	 */
1102 	if (amdgpu_sriov_vf(psp->adev))
1103 		return 0;
1104 
1105 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1106 	if (!cmd)
1107 		return -ENOMEM;
1108 
1109 	psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
1110 
1111 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1112 
1113 	kfree(cmd);
1114 
1115 	return ret;
1116 }
1117 
1118 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1119 {
1120 	/*
1121 	 * TODO: bypass the loading in sriov for now
1122 	 */
1123 	if (amdgpu_sriov_vf(psp->adev))
1124 		return 0;
1125 
1126 	return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
1127 }
1128 
1129 static int psp_hdcp_terminate(struct psp_context *psp)
1130 {
1131 	int ret;
1132 
1133 	/*
1134 	 * TODO: bypass the terminate in sriov for now
1135 	 */
1136 	if (amdgpu_sriov_vf(psp->adev))
1137 		return 0;
1138 
1139 	if (!psp->hdcp_context.hdcp_initialized)
1140 		return 0;
1141 
1142 	ret = psp_hdcp_unload(psp);
1143 	if (ret)
1144 		return ret;
1145 
1146 	psp->hdcp_context.hdcp_initialized = false;
1147 
1148 	/* free hdcp shared memory */
1149 	amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
1150 			      &psp->hdcp_context.hdcp_shared_mc_addr,
1151 			      &psp->hdcp_context.hdcp_shared_buf);
1152 
1153 	return 0;
1154 }
1155 // HDCP end
1156 
1157 // DTM start
1158 static int psp_dtm_init_shared_buf(struct psp_context *psp)
1159 {
1160 	int ret;
1161 
1162 	/*
1163 	 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1164 	 * physical) for dtm ta <-> Driver
1165 	 */
1166 	ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
1167 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1168 				      &psp->dtm_context.dtm_shared_bo,
1169 				      &psp->dtm_context.dtm_shared_mc_addr,
1170 				      &psp->dtm_context.dtm_shared_buf);
1171 
1172 	return ret;
1173 }
1174 
1175 static int psp_dtm_load(struct psp_context *psp)
1176 {
1177 	int ret;
1178 	struct psp_gfx_cmd_resp *cmd;
1179 
1180 	/*
1181 	 * TODO: bypass the loading in sriov for now
1182 	 */
1183 	if (amdgpu_sriov_vf(psp->adev))
1184 		return 0;
1185 
1186 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1187 	if (!cmd)
1188 		return -ENOMEM;
1189 
1190 	memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1191 	memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
1192 
1193 	psp_prep_ta_load_cmd_buf(cmd,
1194 				 psp->fw_pri_mc_addr,
1195 				 psp->ta_dtm_ucode_size,
1196 				 psp->dtm_context.dtm_shared_mc_addr,
1197 				 PSP_DTM_SHARED_MEM_SIZE);
1198 
1199 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1200 
1201 	if (!ret) {
1202 		psp->dtm_context.dtm_initialized = true;
1203 		psp->dtm_context.session_id = cmd->resp.session_id;
1204 		mutex_init(&psp->dtm_context.mutex);
1205 	}
1206 
1207 	kfree(cmd);
1208 
1209 	return ret;
1210 }
1211 
1212 static int psp_dtm_initialize(struct psp_context *psp)
1213 {
1214 	int ret;
1215 
1216 	/*
1217 	 * TODO: bypass the initialize in sriov for now
1218 	 */
1219 	if (amdgpu_sriov_vf(psp->adev))
1220 		return 0;
1221 
1222 	if (!psp->adev->psp.ta_dtm_ucode_size ||
1223 	    !psp->adev->psp.ta_dtm_start_addr) {
1224 		dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1225 		return 0;
1226 	}
1227 
1228 	if (!psp->dtm_context.dtm_initialized) {
1229 		ret = psp_dtm_init_shared_buf(psp);
1230 		if (ret)
1231 			return ret;
1232 	}
1233 
1234 	ret = psp_dtm_load(psp);
1235 	if (ret)
1236 		return ret;
1237 
1238 	return 0;
1239 }
1240 
1241 static int psp_dtm_unload(struct psp_context *psp)
1242 {
1243 	int ret;
1244 	struct psp_gfx_cmd_resp *cmd;
1245 
1246 	/*
1247 	 * TODO: bypass the unloading in sriov for now
1248 	 */
1249 	if (amdgpu_sriov_vf(psp->adev))
1250 		return 0;
1251 
1252 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1253 	if (!cmd)
1254 		return -ENOMEM;
1255 
1256 	psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
1257 
1258 	ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1259 
1260 	kfree(cmd);
1261 
1262 	return ret;
1263 }
1264 
1265 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1266 {
1267 	/*
1268 	 * TODO: bypass the loading in sriov for now
1269 	 */
1270 	if (amdgpu_sriov_vf(psp->adev))
1271 		return 0;
1272 
1273 	return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
1274 }
1275 
1276 static int psp_dtm_terminate(struct psp_context *psp)
1277 {
1278 	int ret;
1279 
1280 	/*
1281 	 * TODO: bypass the terminate in sriov for now
1282 	 */
1283 	if (amdgpu_sriov_vf(psp->adev))
1284 		return 0;
1285 
1286 	if (!psp->dtm_context.dtm_initialized)
1287 		return 0;
1288 
1289 	ret = psp_dtm_unload(psp);
1290 	if (ret)
1291 		return ret;
1292 
1293 	psp->dtm_context.dtm_initialized = false;
1294 
1295 	/* free hdcp shared memory */
1296 	amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
1297 			      &psp->dtm_context.dtm_shared_mc_addr,
1298 			      &psp->dtm_context.dtm_shared_buf);
1299 
1300 	return 0;
1301 }
1302 // DTM end
1303 
1304 static int psp_hw_start(struct psp_context *psp)
1305 {
1306 	struct amdgpu_device *adev = psp->adev;
1307 	int ret;
1308 
1309 	if (!amdgpu_sriov_vf(adev)) {
1310 		if (psp->kdb_bin_size &&
1311 		    (psp->funcs->bootloader_load_kdb != NULL)) {
1312 			ret = psp_bootloader_load_kdb(psp);
1313 			if (ret) {
1314 				DRM_ERROR("PSP load kdb failed!\n");
1315 				return ret;
1316 			}
1317 		}
1318 
1319 		ret = psp_bootloader_load_sysdrv(psp);
1320 		if (ret) {
1321 			DRM_ERROR("PSP load sysdrv failed!\n");
1322 			return ret;
1323 		}
1324 
1325 		ret = psp_bootloader_load_sos(psp);
1326 		if (ret) {
1327 			DRM_ERROR("PSP load sos failed!\n");
1328 			return ret;
1329 		}
1330 	}
1331 
1332 	ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
1333 	if (ret) {
1334 		DRM_ERROR("PSP create ring failed!\n");
1335 		return ret;
1336 	}
1337 
1338 	ret = psp_tmr_init(psp);
1339 	if (ret) {
1340 		DRM_ERROR("PSP tmr init failed!\n");
1341 		return ret;
1342 	}
1343 
1344 	/*
1345 	 * For ASICs with DF Cstate management centralized
1346 	 * to PMFW, TMR setup should be performed after PMFW
1347 	 * loaded and before other non-psp firmware loaded.
1348 	 */
1349 	if (psp->pmfw_centralized_cstate_management) {
1350 		ret = psp_load_smu_fw(psp);
1351 		if (ret)
1352 			return ret;
1353 	}
1354 
1355 	ret = psp_tmr_load(psp);
1356 	if (ret) {
1357 		DRM_ERROR("PSP load tmr failed!\n");
1358 		return ret;
1359 	}
1360 
1361 	return 0;
1362 }
1363 
1364 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
1365 			   enum psp_gfx_fw_type *type)
1366 {
1367 	switch (ucode->ucode_id) {
1368 	case AMDGPU_UCODE_ID_SDMA0:
1369 		*type = GFX_FW_TYPE_SDMA0;
1370 		break;
1371 	case AMDGPU_UCODE_ID_SDMA1:
1372 		*type = GFX_FW_TYPE_SDMA1;
1373 		break;
1374 	case AMDGPU_UCODE_ID_SDMA2:
1375 		*type = GFX_FW_TYPE_SDMA2;
1376 		break;
1377 	case AMDGPU_UCODE_ID_SDMA3:
1378 		*type = GFX_FW_TYPE_SDMA3;
1379 		break;
1380 	case AMDGPU_UCODE_ID_SDMA4:
1381 		*type = GFX_FW_TYPE_SDMA4;
1382 		break;
1383 	case AMDGPU_UCODE_ID_SDMA5:
1384 		*type = GFX_FW_TYPE_SDMA5;
1385 		break;
1386 	case AMDGPU_UCODE_ID_SDMA6:
1387 		*type = GFX_FW_TYPE_SDMA6;
1388 		break;
1389 	case AMDGPU_UCODE_ID_SDMA7:
1390 		*type = GFX_FW_TYPE_SDMA7;
1391 		break;
1392 	case AMDGPU_UCODE_ID_CP_CE:
1393 		*type = GFX_FW_TYPE_CP_CE;
1394 		break;
1395 	case AMDGPU_UCODE_ID_CP_PFP:
1396 		*type = GFX_FW_TYPE_CP_PFP;
1397 		break;
1398 	case AMDGPU_UCODE_ID_CP_ME:
1399 		*type = GFX_FW_TYPE_CP_ME;
1400 		break;
1401 	case AMDGPU_UCODE_ID_CP_MEC1:
1402 		*type = GFX_FW_TYPE_CP_MEC;
1403 		break;
1404 	case AMDGPU_UCODE_ID_CP_MEC1_JT:
1405 		*type = GFX_FW_TYPE_CP_MEC_ME1;
1406 		break;
1407 	case AMDGPU_UCODE_ID_CP_MEC2:
1408 		*type = GFX_FW_TYPE_CP_MEC;
1409 		break;
1410 	case AMDGPU_UCODE_ID_CP_MEC2_JT:
1411 		*type = GFX_FW_TYPE_CP_MEC_ME2;
1412 		break;
1413 	case AMDGPU_UCODE_ID_RLC_G:
1414 		*type = GFX_FW_TYPE_RLC_G;
1415 		break;
1416 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
1417 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
1418 		break;
1419 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
1420 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
1421 		break;
1422 	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
1423 		*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
1424 		break;
1425 	case AMDGPU_UCODE_ID_SMC:
1426 		*type = GFX_FW_TYPE_SMU;
1427 		break;
1428 	case AMDGPU_UCODE_ID_UVD:
1429 		*type = GFX_FW_TYPE_UVD;
1430 		break;
1431 	case AMDGPU_UCODE_ID_UVD1:
1432 		*type = GFX_FW_TYPE_UVD1;
1433 		break;
1434 	case AMDGPU_UCODE_ID_VCE:
1435 		*type = GFX_FW_TYPE_VCE;
1436 		break;
1437 	case AMDGPU_UCODE_ID_VCN:
1438 		*type = GFX_FW_TYPE_VCN;
1439 		break;
1440 	case AMDGPU_UCODE_ID_VCN1:
1441 		*type = GFX_FW_TYPE_VCN1;
1442 		break;
1443 	case AMDGPU_UCODE_ID_DMCU_ERAM:
1444 		*type = GFX_FW_TYPE_DMCU_ERAM;
1445 		break;
1446 	case AMDGPU_UCODE_ID_DMCU_INTV:
1447 		*type = GFX_FW_TYPE_DMCU_ISR;
1448 		break;
1449 	case AMDGPU_UCODE_ID_VCN0_RAM:
1450 		*type = GFX_FW_TYPE_VCN0_RAM;
1451 		break;
1452 	case AMDGPU_UCODE_ID_VCN1_RAM:
1453 		*type = GFX_FW_TYPE_VCN1_RAM;
1454 		break;
1455 	case AMDGPU_UCODE_ID_DMCUB:
1456 		*type = GFX_FW_TYPE_DMUB;
1457 		break;
1458 	case AMDGPU_UCODE_ID_MAXIMUM:
1459 	default:
1460 		return -EINVAL;
1461 	}
1462 
1463 	return 0;
1464 }
1465 
1466 static void psp_print_fw_hdr(struct psp_context *psp,
1467 			     struct amdgpu_firmware_info *ucode)
1468 {
1469 	struct amdgpu_device *adev = psp->adev;
1470 	struct common_firmware_header *hdr;
1471 
1472 	switch (ucode->ucode_id) {
1473 	case AMDGPU_UCODE_ID_SDMA0:
1474 	case AMDGPU_UCODE_ID_SDMA1:
1475 	case AMDGPU_UCODE_ID_SDMA2:
1476 	case AMDGPU_UCODE_ID_SDMA3:
1477 	case AMDGPU_UCODE_ID_SDMA4:
1478 	case AMDGPU_UCODE_ID_SDMA5:
1479 	case AMDGPU_UCODE_ID_SDMA6:
1480 	case AMDGPU_UCODE_ID_SDMA7:
1481 		hdr = (struct common_firmware_header *)
1482 			adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
1483 		amdgpu_ucode_print_sdma_hdr(hdr);
1484 		break;
1485 	case AMDGPU_UCODE_ID_CP_CE:
1486 		hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
1487 		amdgpu_ucode_print_gfx_hdr(hdr);
1488 		break;
1489 	case AMDGPU_UCODE_ID_CP_PFP:
1490 		hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
1491 		amdgpu_ucode_print_gfx_hdr(hdr);
1492 		break;
1493 	case AMDGPU_UCODE_ID_CP_ME:
1494 		hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
1495 		amdgpu_ucode_print_gfx_hdr(hdr);
1496 		break;
1497 	case AMDGPU_UCODE_ID_CP_MEC1:
1498 		hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
1499 		amdgpu_ucode_print_gfx_hdr(hdr);
1500 		break;
1501 	case AMDGPU_UCODE_ID_RLC_G:
1502 		hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
1503 		amdgpu_ucode_print_rlc_hdr(hdr);
1504 		break;
1505 	case AMDGPU_UCODE_ID_SMC:
1506 		hdr = (struct common_firmware_header *)adev->pm.fw->data;
1507 		amdgpu_ucode_print_smc_hdr(hdr);
1508 		break;
1509 	default:
1510 		break;
1511 	}
1512 }
1513 
1514 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
1515 				       struct psp_gfx_cmd_resp *cmd)
1516 {
1517 	int ret;
1518 	uint64_t fw_mem_mc_addr = ucode->mc_addr;
1519 
1520 	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
1521 
1522 	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1523 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
1524 	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
1525 	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
1526 
1527 	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
1528 	if (ret)
1529 		DRM_ERROR("Unknown firmware type\n");
1530 
1531 	return ret;
1532 }
1533 
1534 static int psp_execute_np_fw_load(struct psp_context *psp,
1535 			          struct amdgpu_firmware_info *ucode)
1536 {
1537 	int ret = 0;
1538 
1539 	ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd);
1540 	if (ret)
1541 		return ret;
1542 
1543 	ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
1544 				 psp->fence_buf_mc_addr);
1545 
1546 	return ret;
1547 }
1548 
1549 static int psp_load_smu_fw(struct psp_context *psp)
1550 {
1551 	int ret;
1552 	struct amdgpu_device* adev = psp->adev;
1553 	struct amdgpu_firmware_info *ucode =
1554 			&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
1555 	struct amdgpu_ras *ras = psp->ras.ras;
1556 
1557 	if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
1558 		return 0;
1559 
1560 
1561 	if (adev->in_gpu_reset && ras && ras->supported) {
1562 		ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
1563 		if (ret) {
1564 			DRM_WARN("Failed to set MP1 state prepare for reload\n");
1565 		}
1566 	}
1567 
1568 	ret = psp_execute_np_fw_load(psp, ucode);
1569 
1570 	if (ret)
1571 		DRM_ERROR("PSP load smu failed!\n");
1572 
1573 	return ret;
1574 }
1575 
1576 static bool fw_load_skip_check(struct psp_context *psp,
1577 			       struct amdgpu_firmware_info *ucode)
1578 {
1579 	if (!ucode->fw)
1580 		return true;
1581 
1582 	if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
1583 	    (psp_smu_reload_quirk(psp) ||
1584 	     psp->autoload_supported ||
1585 	     psp->pmfw_centralized_cstate_management))
1586 		return true;
1587 
1588 	if (amdgpu_sriov_vf(psp->adev) &&
1589 	   (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
1590 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
1591 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
1592 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
1593 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
1594 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
1595 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
1596 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
1597 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
1598 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
1599 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
1600 	    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
1601 	    || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
1602 		/*skip ucode loading in SRIOV VF */
1603 		return true;
1604 
1605 	if (psp->autoload_supported &&
1606 	    (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
1607 	     ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
1608 		/* skip mec JT when autoload is enabled */
1609 		return true;
1610 
1611 	return false;
1612 }
1613 
1614 static int psp_np_fw_load(struct psp_context *psp)
1615 {
1616 	int i, ret;
1617 	struct amdgpu_firmware_info *ucode;
1618 	struct amdgpu_device* adev = psp->adev;
1619 
1620 	if (psp->autoload_supported &&
1621 	    !psp->pmfw_centralized_cstate_management) {
1622 		ret = psp_load_smu_fw(psp);
1623 		if (ret)
1624 			return ret;
1625 	}
1626 
1627 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
1628 		ucode = &adev->firmware.ucode[i];
1629 
1630 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
1631 		    !fw_load_skip_check(psp, ucode)) {
1632 			ret = psp_load_smu_fw(psp);
1633 			if (ret)
1634 				return ret;
1635 			continue;
1636 		}
1637 
1638 		if (fw_load_skip_check(psp, ucode))
1639 			continue;
1640 
1641 		psp_print_fw_hdr(psp, ucode);
1642 
1643 		ret = psp_execute_np_fw_load(psp, ucode);
1644 		if (ret)
1645 			return ret;
1646 
1647 		/* Start rlc autoload after psp recieved all the gfx firmware */
1648 		if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
1649 		    AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
1650 			ret = psp_rlc_autoload_start(psp);
1651 			if (ret) {
1652 				DRM_ERROR("Failed to start rlc autoload\n");
1653 				return ret;
1654 			}
1655 		}
1656 	}
1657 
1658 	return 0;
1659 }
1660 
1661 static int psp_load_fw(struct amdgpu_device *adev)
1662 {
1663 	int ret;
1664 	struct psp_context *psp = &adev->psp;
1665 
1666 	if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
1667 		psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
1668 		goto skip_memalloc;
1669 	}
1670 
1671 	psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1672 	if (!psp->cmd)
1673 		return -ENOMEM;
1674 
1675 	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
1676 					AMDGPU_GEM_DOMAIN_GTT,
1677 					&psp->fw_pri_bo,
1678 					&psp->fw_pri_mc_addr,
1679 					&psp->fw_pri_buf);
1680 	if (ret)
1681 		goto failed;
1682 
1683 	ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
1684 					AMDGPU_GEM_DOMAIN_VRAM,
1685 					&psp->fence_buf_bo,
1686 					&psp->fence_buf_mc_addr,
1687 					&psp->fence_buf);
1688 	if (ret)
1689 		goto failed;
1690 
1691 	ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
1692 				      AMDGPU_GEM_DOMAIN_VRAM,
1693 				      &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
1694 				      (void **)&psp->cmd_buf_mem);
1695 	if (ret)
1696 		goto failed;
1697 
1698 	memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
1699 
1700 	ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
1701 	if (ret) {
1702 		DRM_ERROR("PSP ring init failed!\n");
1703 		goto failed;
1704 	}
1705 
1706 skip_memalloc:
1707 	ret = psp_hw_start(psp);
1708 	if (ret)
1709 		goto failed;
1710 
1711 	ret = psp_np_fw_load(psp);
1712 	if (ret)
1713 		goto failed;
1714 
1715 	ret = psp_asd_load(psp);
1716 	if (ret) {
1717 		DRM_ERROR("PSP load asd failed!\n");
1718 		return ret;
1719 	}
1720 
1721 	if (psp->adev->psp.ta_fw) {
1722 		ret = psp_ras_initialize(psp);
1723 		if (ret)
1724 			dev_err(psp->adev->dev,
1725 					"RAS: Failed to initialize RAS\n");
1726 
1727 		ret = psp_hdcp_initialize(psp);
1728 		if (ret)
1729 			dev_err(psp->adev->dev,
1730 				"HDCP: Failed to initialize HDCP\n");
1731 
1732 		ret = psp_dtm_initialize(psp);
1733 		if (ret)
1734 			dev_err(psp->adev->dev,
1735 				"DTM: Failed to initialize DTM\n");
1736 	}
1737 
1738 	return 0;
1739 
1740 failed:
1741 	/*
1742 	 * all cleanup jobs (xgmi terminate, ras terminate,
1743 	 * ring destroy, cmd/fence/fw buffers destory,
1744 	 * psp->cmd destory) are delayed to psp_hw_fini
1745 	 */
1746 	return ret;
1747 }
1748 
1749 static int psp_hw_init(void *handle)
1750 {
1751 	int ret;
1752 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1753 
1754 	mutex_lock(&adev->firmware.mutex);
1755 	/*
1756 	 * This sequence is just used on hw_init only once, no need on
1757 	 * resume.
1758 	 */
1759 	ret = amdgpu_ucode_init_bo(adev);
1760 	if (ret)
1761 		goto failed;
1762 
1763 	ret = psp_load_fw(adev);
1764 	if (ret) {
1765 		DRM_ERROR("PSP firmware loading failed\n");
1766 		goto failed;
1767 	}
1768 
1769 	mutex_unlock(&adev->firmware.mutex);
1770 	return 0;
1771 
1772 failed:
1773 	adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
1774 	mutex_unlock(&adev->firmware.mutex);
1775 	return -EINVAL;
1776 }
1777 
1778 static int psp_hw_fini(void *handle)
1779 {
1780 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1781 	struct psp_context *psp = &adev->psp;
1782 	void *tmr_buf;
1783 	void **pptr;
1784 
1785 	if (psp->adev->psp.ta_fw) {
1786 		psp_ras_terminate(psp);
1787 		psp_dtm_terminate(psp);
1788 		psp_hdcp_terminate(psp);
1789 	}
1790 
1791 	psp_asd_unload(psp);
1792 
1793 	psp_ring_destroy(psp, PSP_RING_TYPE__KM);
1794 
1795 	pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
1796 	amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
1797 	amdgpu_bo_free_kernel(&psp->fw_pri_bo,
1798 			      &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
1799 	amdgpu_bo_free_kernel(&psp->fence_buf_bo,
1800 			      &psp->fence_buf_mc_addr, &psp->fence_buf);
1801 	amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
1802 			      (void **)&psp->cmd_buf_mem);
1803 
1804 	kfree(psp->cmd);
1805 	psp->cmd = NULL;
1806 
1807 	return 0;
1808 }
1809 
1810 static int psp_suspend(void *handle)
1811 {
1812 	int ret;
1813 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1814 	struct psp_context *psp = &adev->psp;
1815 
1816 	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1817 	    psp->xgmi_context.initialized == 1) {
1818 		ret = psp_xgmi_terminate(psp);
1819 		if (ret) {
1820 			DRM_ERROR("Failed to terminate xgmi ta\n");
1821 			return ret;
1822 		}
1823 	}
1824 
1825 	if (psp->adev->psp.ta_fw) {
1826 		ret = psp_ras_terminate(psp);
1827 		if (ret) {
1828 			DRM_ERROR("Failed to terminate ras ta\n");
1829 			return ret;
1830 		}
1831 		ret = psp_hdcp_terminate(psp);
1832 		if (ret) {
1833 			DRM_ERROR("Failed to terminate hdcp ta\n");
1834 			return ret;
1835 		}
1836 		ret = psp_dtm_terminate(psp);
1837 		if (ret) {
1838 			DRM_ERROR("Failed to terminate dtm ta\n");
1839 			return ret;
1840 		}
1841 	}
1842 
1843 	ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
1844 	if (ret) {
1845 		DRM_ERROR("PSP ring stop failed\n");
1846 		return ret;
1847 	}
1848 
1849 	return 0;
1850 }
1851 
1852 static int psp_resume(void *handle)
1853 {
1854 	int ret;
1855 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1856 	struct psp_context *psp = &adev->psp;
1857 
1858 	DRM_INFO("PSP is resuming...\n");
1859 
1860 	ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
1861 	if (ret) {
1862 		DRM_ERROR("Failed to process memory training!\n");
1863 		return ret;
1864 	}
1865 
1866 	mutex_lock(&adev->firmware.mutex);
1867 
1868 	ret = psp_hw_start(psp);
1869 	if (ret)
1870 		goto failed;
1871 
1872 	ret = psp_np_fw_load(psp);
1873 	if (ret)
1874 		goto failed;
1875 
1876 	ret = psp_asd_load(psp);
1877 	if (ret) {
1878 		DRM_ERROR("PSP load asd failed!\n");
1879 		goto failed;
1880 	}
1881 
1882 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
1883 		ret = psp_xgmi_initialize(psp);
1884 		/* Warning the XGMI seesion initialize failure
1885 		 * Instead of stop driver initialization
1886 		 */
1887 		if (ret)
1888 			dev_err(psp->adev->dev,
1889 				"XGMI: Failed to initialize XGMI session\n");
1890 	}
1891 
1892 	if (psp->adev->psp.ta_fw) {
1893 		ret = psp_ras_initialize(psp);
1894 		if (ret)
1895 			dev_err(psp->adev->dev,
1896 					"RAS: Failed to initialize RAS\n");
1897 
1898 		ret = psp_hdcp_initialize(psp);
1899 		if (ret)
1900 			dev_err(psp->adev->dev,
1901 				"HDCP: Failed to initialize HDCP\n");
1902 
1903 		ret = psp_dtm_initialize(psp);
1904 		if (ret)
1905 			dev_err(psp->adev->dev,
1906 				"DTM: Failed to initialize DTM\n");
1907 	}
1908 
1909 	mutex_unlock(&adev->firmware.mutex);
1910 
1911 	return 0;
1912 
1913 failed:
1914 	DRM_ERROR("PSP resume failed\n");
1915 	mutex_unlock(&adev->firmware.mutex);
1916 	return ret;
1917 }
1918 
1919 int psp_gpu_reset(struct amdgpu_device *adev)
1920 {
1921 	int ret;
1922 
1923 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1924 		return 0;
1925 
1926 	mutex_lock(&adev->psp.mutex);
1927 	ret = psp_mode1_reset(&adev->psp);
1928 	mutex_unlock(&adev->psp.mutex);
1929 
1930 	return ret;
1931 }
1932 
1933 int psp_rlc_autoload_start(struct psp_context *psp)
1934 {
1935 	int ret;
1936 	struct psp_gfx_cmd_resp *cmd;
1937 
1938 	cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
1939 	if (!cmd)
1940 		return -ENOMEM;
1941 
1942 	cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
1943 
1944 	ret = psp_cmd_submit_buf(psp, NULL, cmd,
1945 				 psp->fence_buf_mc_addr);
1946 	kfree(cmd);
1947 	return ret;
1948 }
1949 
1950 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
1951 			uint64_t cmd_gpu_addr, int cmd_size)
1952 {
1953 	struct amdgpu_firmware_info ucode = {0};
1954 
1955 	ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
1956 		AMDGPU_UCODE_ID_VCN0_RAM;
1957 	ucode.mc_addr = cmd_gpu_addr;
1958 	ucode.ucode_size = cmd_size;
1959 
1960 	return psp_execute_np_fw_load(&adev->psp, &ucode);
1961 }
1962 
1963 int psp_ring_cmd_submit(struct psp_context *psp,
1964 			uint64_t cmd_buf_mc_addr,
1965 			uint64_t fence_mc_addr,
1966 			int index)
1967 {
1968 	unsigned int psp_write_ptr_reg = 0;
1969 	struct psp_gfx_rb_frame *write_frame;
1970 	struct psp_ring *ring = &psp->km_ring;
1971 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
1972 	struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
1973 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
1974 	struct amdgpu_device *adev = psp->adev;
1975 	uint32_t ring_size_dw = ring->ring_size / 4;
1976 	uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
1977 
1978 	/* KM (GPCOM) prepare write pointer */
1979 	psp_write_ptr_reg = psp_ring_get_wptr(psp);
1980 
1981 	/* Update KM RB frame pointer to new frame */
1982 	/* write_frame ptr increments by size of rb_frame in bytes */
1983 	/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
1984 	if ((psp_write_ptr_reg % ring_size_dw) == 0)
1985 		write_frame = ring_buffer_start;
1986 	else
1987 		write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
1988 	/* Check invalid write_frame ptr address */
1989 	if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
1990 		DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
1991 			  ring_buffer_start, ring_buffer_end, write_frame);
1992 		DRM_ERROR("write_frame is pointing to address out of bounds\n");
1993 		return -EINVAL;
1994 	}
1995 
1996 	/* Initialize KM RB frame */
1997 	memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
1998 
1999 	/* Update KM RB frame */
2000 	write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
2001 	write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
2002 	write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
2003 	write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
2004 	write_frame->fence_value = index;
2005 	amdgpu_asic_flush_hdp(adev, NULL);
2006 
2007 	/* Update the write Pointer in DWORDs */
2008 	psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
2009 	psp_ring_set_wptr(psp, psp_write_ptr_reg);
2010 	return 0;
2011 }
2012 
2013 int psp_init_asd_microcode(struct psp_context *psp,
2014 			   const char *chip_name)
2015 {
2016 	struct amdgpu_device *adev = psp->adev;
2017 	char fw_name[30];
2018 	const struct psp_firmware_header_v1_0 *asd_hdr;
2019 	int err = 0;
2020 
2021 	if (!chip_name) {
2022 		dev_err(adev->dev, "invalid chip name for asd microcode\n");
2023 		return -EINVAL;
2024 	}
2025 
2026 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
2027 	err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
2028 	if (err)
2029 		goto out;
2030 
2031 	err = amdgpu_ucode_validate(adev->psp.asd_fw);
2032 	if (err)
2033 		goto out;
2034 
2035 	asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
2036 	adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
2037 	adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
2038 	adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
2039 	adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
2040 				le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
2041 	return 0;
2042 out:
2043 	dev_err(adev->dev, "fail to initialize asd microcode\n");
2044 	release_firmware(adev->psp.asd_fw);
2045 	adev->psp.asd_fw = NULL;
2046 	return err;
2047 }
2048 
2049 int psp_init_sos_microcode(struct psp_context *psp,
2050 			   const char *chip_name)
2051 {
2052 	struct amdgpu_device *adev = psp->adev;
2053 	char fw_name[30];
2054 	const struct psp_firmware_header_v1_0 *sos_hdr;
2055 	const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
2056 	const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
2057 	int err = 0;
2058 
2059 	if (!chip_name) {
2060 		dev_err(adev->dev, "invalid chip name for sos microcode\n");
2061 		return -EINVAL;
2062 	}
2063 
2064 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
2065 	err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
2066 	if (err)
2067 		goto out;
2068 
2069 	err = amdgpu_ucode_validate(adev->psp.sos_fw);
2070 	if (err)
2071 		goto out;
2072 
2073 	sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
2074 	amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
2075 
2076 	switch (sos_hdr->header.header_version_major) {
2077 	case 1:
2078 		adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
2079 		adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
2080 		adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
2081 		adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
2082 		adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
2083 				le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
2084 		adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2085 				le32_to_cpu(sos_hdr->sos_offset_bytes);
2086 		if (sos_hdr->header.header_version_minor == 1) {
2087 			sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
2088 			adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
2089 			adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2090 					le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
2091 			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
2092 			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2093 					le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
2094 		}
2095 		if (sos_hdr->header.header_version_minor == 2) {
2096 			sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
2097 			adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
2098 			adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
2099 						    le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
2100 		}
2101 		break;
2102 	default:
2103 		dev_err(adev->dev,
2104 			"unsupported psp sos firmware\n");
2105 		err = -EINVAL;
2106 		goto out;
2107 	}
2108 
2109 	return 0;
2110 out:
2111 	dev_err(adev->dev,
2112 		"failed to init sos firmware\n");
2113 	release_firmware(adev->psp.sos_fw);
2114 	adev->psp.sos_fw = NULL;
2115 
2116 	return err;
2117 }
2118 
2119 static int psp_set_clockgating_state(void *handle,
2120 				     enum amd_clockgating_state state)
2121 {
2122 	return 0;
2123 }
2124 
2125 static int psp_set_powergating_state(void *handle,
2126 				     enum amd_powergating_state state)
2127 {
2128 	return 0;
2129 }
2130 
2131 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
2132 					 struct device_attribute *attr,
2133 					 char *buf)
2134 {
2135 	struct drm_device *ddev = dev_get_drvdata(dev);
2136 	struct amdgpu_device *adev = ddev->dev_private;
2137 	uint32_t fw_ver;
2138 	int ret;
2139 
2140 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
2141 		DRM_INFO("PSP block is not ready yet.");
2142 		return -EBUSY;
2143 	}
2144 
2145 	mutex_lock(&adev->psp.mutex);
2146 	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
2147 	mutex_unlock(&adev->psp.mutex);
2148 
2149 	if (ret) {
2150 		DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
2151 		return ret;
2152 	}
2153 
2154 	return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
2155 }
2156 
2157 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
2158 						       struct device_attribute *attr,
2159 						       const char *buf,
2160 						       size_t count)
2161 {
2162 	struct drm_device *ddev = dev_get_drvdata(dev);
2163 	struct amdgpu_device *adev = ddev->dev_private;
2164 	void *cpu_addr;
2165 	dma_addr_t dma_addr;
2166 	int ret;
2167 	char fw_name[100];
2168 	const struct firmware *usbc_pd_fw;
2169 
2170 	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
2171 		DRM_INFO("PSP block is not ready yet.");
2172 		return -EBUSY;
2173 	}
2174 
2175 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
2176 	ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
2177 	if (ret)
2178 		goto fail;
2179 
2180 	/* We need contiguous physical mem to place the FW  for psp to access */
2181 	cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
2182 
2183 	ret = dma_mapping_error(adev->dev, dma_addr);
2184 	if (ret)
2185 		goto rel_buf;
2186 
2187 	memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
2188 
2189 	/*
2190 	 * x86 specific workaround.
2191 	 * Without it the buffer is invisible in PSP.
2192 	 *
2193 	 * TODO Remove once PSP starts snooping CPU cache
2194 	 */
2195 #ifdef CONFIG_X86
2196 	clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
2197 #endif
2198 
2199 	mutex_lock(&adev->psp.mutex);
2200 	ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
2201 	mutex_unlock(&adev->psp.mutex);
2202 
2203 rel_buf:
2204 	dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
2205 	release_firmware(usbc_pd_fw);
2206 
2207 fail:
2208 	if (ret) {
2209 		DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
2210 		return ret;
2211 	}
2212 
2213 	return count;
2214 }
2215 
2216 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
2217 		   psp_usbc_pd_fw_sysfs_read,
2218 		   psp_usbc_pd_fw_sysfs_write);
2219 
2220 
2221 
2222 const struct amd_ip_funcs psp_ip_funcs = {
2223 	.name = "psp",
2224 	.early_init = psp_early_init,
2225 	.late_init = NULL,
2226 	.sw_init = psp_sw_init,
2227 	.sw_fini = psp_sw_fini,
2228 	.hw_init = psp_hw_init,
2229 	.hw_fini = psp_hw_fini,
2230 	.suspend = psp_suspend,
2231 	.resume = psp_resume,
2232 	.is_idle = NULL,
2233 	.check_soft_reset = NULL,
2234 	.wait_for_idle = NULL,
2235 	.soft_reset = NULL,
2236 	.set_clockgating_state = psp_set_clockgating_state,
2237 	.set_powergating_state = psp_set_powergating_state,
2238 };
2239 
2240 static int psp_sysfs_init(struct amdgpu_device *adev)
2241 {
2242 	int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
2243 
2244 	if (ret)
2245 		DRM_ERROR("Failed to create USBC PD FW control file!");
2246 
2247 	return ret;
2248 }
2249 
2250 static void psp_sysfs_fini(struct amdgpu_device *adev)
2251 {
2252 	device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
2253 }
2254 
2255 const struct amdgpu_ip_block_version psp_v3_1_ip_block =
2256 {
2257 	.type = AMD_IP_BLOCK_TYPE_PSP,
2258 	.major = 3,
2259 	.minor = 1,
2260 	.rev = 0,
2261 	.funcs = &psp_ip_funcs,
2262 };
2263 
2264 const struct amdgpu_ip_block_version psp_v10_0_ip_block =
2265 {
2266 	.type = AMD_IP_BLOCK_TYPE_PSP,
2267 	.major = 10,
2268 	.minor = 0,
2269 	.rev = 0,
2270 	.funcs = &psp_ip_funcs,
2271 };
2272 
2273 const struct amdgpu_ip_block_version psp_v11_0_ip_block =
2274 {
2275 	.type = AMD_IP_BLOCK_TYPE_PSP,
2276 	.major = 11,
2277 	.minor = 0,
2278 	.rev = 0,
2279 	.funcs = &psp_ip_funcs,
2280 };
2281 
2282 const struct amdgpu_ip_block_version psp_v12_0_ip_block =
2283 {
2284 	.type = AMD_IP_BLOCK_TYPE_PSP,
2285 	.major = 12,
2286 	.minor = 0,
2287 	.rev = 0,
2288 	.funcs = &psp_ip_funcs,
2289 };
2290