1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/firmware.h> 24 #include <linux/module.h> 25 #include <linux/vmalloc.h> 26 27 #include "amdgpu.h" 28 #include "amdgpu_psp.h" 29 #include "amdgpu_ucode.h" 30 #include "soc15_common.h" 31 #include "psp_v11_0.h" 32 33 #include "mp/mp_11_0_offset.h" 34 #include "mp/mp_11_0_sh_mask.h" 35 #include "gc/gc_9_0_offset.h" 36 #include "sdma0/sdma0_4_0_offset.h" 37 #include "nbio/nbio_7_4_offset.h" 38 39 #include "oss/osssys_4_0_offset.h" 40 #include "oss/osssys_4_0_sh_mask.h" 41 42 MODULE_FIRMWARE("amdgpu/vega20_sos.bin"); 43 MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); 44 MODULE_FIRMWARE("amdgpu/vega20_ta.bin"); 45 MODULE_FIRMWARE("amdgpu/navi10_sos.bin"); 46 MODULE_FIRMWARE("amdgpu/navi10_asd.bin"); 47 MODULE_FIRMWARE("amdgpu/navi10_ta.bin"); 48 MODULE_FIRMWARE("amdgpu/navi14_sos.bin"); 49 MODULE_FIRMWARE("amdgpu/navi14_asd.bin"); 50 MODULE_FIRMWARE("amdgpu/navi14_ta.bin"); 51 MODULE_FIRMWARE("amdgpu/navi12_sos.bin"); 52 MODULE_FIRMWARE("amdgpu/navi12_asd.bin"); 53 MODULE_FIRMWARE("amdgpu/navi12_ta.bin"); 54 MODULE_FIRMWARE("amdgpu/arcturus_sos.bin"); 55 MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); 56 MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); 57 58 /* address block */ 59 #define smnMP1_FIRMWARE_FLAGS 0x3010024 60 /* navi10 reg offset define */ 61 #define mmRLC_GPM_UCODE_ADDR_NV10 0x5b61 62 #define mmRLC_GPM_UCODE_DATA_NV10 0x5b62 63 #define mmSDMA0_UCODE_ADDR_NV10 0x5880 64 #define mmSDMA0_UCODE_DATA_NV10 0x5881 65 /* memory training timeout define */ 66 #define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 67 68 static int psp_v11_0_init_microcode(struct psp_context *psp) 69 { 70 struct amdgpu_device *adev = psp->adev; 71 const char *chip_name; 72 char fw_name[30]; 73 int err = 0; 74 const struct psp_firmware_header_v1_0 *sos_hdr; 75 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 76 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 77 const struct psp_firmware_header_v1_0 *asd_hdr; 78 const struct ta_firmware_header_v1_0 *ta_hdr; 79 80 DRM_DEBUG("\n"); 81 82 switch (adev->asic_type) { 83 case CHIP_VEGA20: 84 chip_name = "vega20"; 85 break; 86 case CHIP_NAVI10: 87 chip_name = "navi10"; 88 break; 89 case CHIP_NAVI14: 90 chip_name = "navi14"; 91 break; 92 case CHIP_NAVI12: 93 chip_name = "navi12"; 94 break; 95 case CHIP_ARCTURUS: 96 chip_name = "arcturus"; 97 break; 98 default: 99 BUG(); 100 } 101 102 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); 103 err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); 104 if (err) 105 goto out; 106 107 err = amdgpu_ucode_validate(adev->psp.sos_fw); 108 if (err) 109 goto out; 110 111 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 112 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 113 114 switch (sos_hdr->header.header_version_major) { 115 case 1: 116 adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 117 adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); 118 adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); 119 adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes); 120 adev->psp.sys_start_addr = (uint8_t *)sos_hdr + 121 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 122 adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + 123 le32_to_cpu(sos_hdr->sos_offset_bytes); 124 if (sos_hdr->header.header_version_minor == 1) { 125 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 126 adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes); 127 adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + 128 le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes); 129 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes); 130 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 131 le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes); 132 } 133 if (sos_hdr->header.header_version_minor == 2) { 134 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 135 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes); 136 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 137 le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes); 138 } 139 break; 140 default: 141 dev_err(adev->dev, 142 "Unsupported psp sos firmware\n"); 143 err = -EINVAL; 144 goto out; 145 } 146 147 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 148 err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); 149 if (err) 150 goto out1; 151 152 err = amdgpu_ucode_validate(adev->psp.asd_fw); 153 if (err) 154 goto out1; 155 156 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 157 adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 158 adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); 159 adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 160 adev->psp.asd_start_addr = (uint8_t *)asd_hdr + 161 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 162 163 switch (adev->asic_type) { 164 case CHIP_VEGA20: 165 case CHIP_ARCTURUS: 166 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 167 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); 168 if (err) { 169 release_firmware(adev->psp.ta_fw); 170 adev->psp.ta_fw = NULL; 171 dev_info(adev->dev, 172 "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); 173 } else { 174 err = amdgpu_ucode_validate(adev->psp.ta_fw); 175 if (err) 176 goto out2; 177 178 ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; 179 adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); 180 adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); 181 adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + 182 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 183 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 184 adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version); 185 adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes); 186 adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr + 187 le32_to_cpu(ta_hdr->ta_ras_offset_bytes); 188 } 189 break; 190 case CHIP_NAVI10: 191 case CHIP_NAVI14: 192 case CHIP_NAVI12: 193 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 194 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); 195 if (err) { 196 release_firmware(adev->psp.ta_fw); 197 adev->psp.ta_fw = NULL; 198 dev_info(adev->dev, 199 "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); 200 } else { 201 err = amdgpu_ucode_validate(adev->psp.ta_fw); 202 if (err) 203 goto out2; 204 205 ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; 206 adev->psp.ta_hdcp_ucode_version = le32_to_cpu(ta_hdr->ta_hdcp_ucode_version); 207 adev->psp.ta_hdcp_ucode_size = le32_to_cpu(ta_hdr->ta_hdcp_size_bytes); 208 adev->psp.ta_hdcp_start_addr = (uint8_t *)ta_hdr + 209 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 210 211 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); 212 213 adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version); 214 adev->psp.ta_dtm_ucode_size = le32_to_cpu(ta_hdr->ta_dtm_size_bytes); 215 adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr + 216 le32_to_cpu(ta_hdr->ta_dtm_offset_bytes); 217 } 218 break; 219 default: 220 BUG(); 221 } 222 223 return 0; 224 225 out2: 226 release_firmware(adev->psp.ta_fw); 227 adev->psp.ta_fw = NULL; 228 out1: 229 release_firmware(adev->psp.asd_fw); 230 adev->psp.asd_fw = NULL; 231 out: 232 dev_err(adev->dev, 233 "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); 234 release_firmware(adev->psp.sos_fw); 235 adev->psp.sos_fw = NULL; 236 237 return err; 238 } 239 240 int psp_v11_0_wait_for_bootloader(struct psp_context *psp) 241 { 242 struct amdgpu_device *adev = psp->adev; 243 244 int ret; 245 int retry_loop; 246 247 for (retry_loop = 0; retry_loop < 10; retry_loop++) { 248 /* Wait for bootloader to signify that is 249 ready having bit 31 of C2PMSG_35 set to 1 */ 250 ret = psp_wait_for(psp, 251 SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), 252 0x80000000, 253 0x80000000, 254 false); 255 256 if (ret == 0) 257 return 0; 258 } 259 260 return ret; 261 } 262 263 static bool psp_v11_0_is_sos_alive(struct psp_context *psp) 264 { 265 struct amdgpu_device *adev = psp->adev; 266 uint32_t sol_reg; 267 268 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 269 270 return sol_reg != 0x0; 271 } 272 273 static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) 274 { 275 int ret; 276 uint32_t psp_gfxdrv_command_reg = 0; 277 struct amdgpu_device *adev = psp->adev; 278 279 /* Check tOS sign of life register to confirm sys driver and sOS 280 * are already been loaded. 281 */ 282 if (psp_v11_0_is_sos_alive(psp)) { 283 psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); 284 dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); 285 return 0; 286 } 287 288 ret = psp_v11_0_wait_for_bootloader(psp); 289 if (ret) 290 return ret; 291 292 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 293 294 /* Copy PSP KDB binary to memory */ 295 memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size); 296 297 /* Provide the PSP KDB to bootloader */ 298 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, 299 (uint32_t)(psp->fw_pri_mc_addr >> 20)); 300 psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE; 301 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 302 psp_gfxdrv_command_reg); 303 304 ret = psp_v11_0_wait_for_bootloader(psp); 305 306 return ret; 307 } 308 309 static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) 310 { 311 int ret; 312 uint32_t psp_gfxdrv_command_reg = 0; 313 struct amdgpu_device *adev = psp->adev; 314 315 /* Check sOS sign of life register to confirm sys driver and sOS 316 * are already been loaded. 317 */ 318 if (psp_v11_0_is_sos_alive(psp)) { 319 psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); 320 dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); 321 return 0; 322 } 323 324 ret = psp_v11_0_wait_for_bootloader(psp); 325 if (ret) 326 return ret; 327 328 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 329 330 /* Copy PSP System Driver binary to memory */ 331 memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); 332 333 /* Provide the sys driver to bootloader */ 334 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, 335 (uint32_t)(psp->fw_pri_mc_addr >> 20)); 336 psp_gfxdrv_command_reg = PSP_BL__LOAD_SYSDRV; 337 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 338 psp_gfxdrv_command_reg); 339 340 /* there might be handshake issue with hardware which needs delay */ 341 mdelay(20); 342 343 ret = psp_v11_0_wait_for_bootloader(psp); 344 345 return ret; 346 } 347 348 static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) 349 { 350 int ret; 351 unsigned int psp_gfxdrv_command_reg = 0; 352 struct amdgpu_device *adev = psp->adev; 353 354 /* Check sOS sign of life register to confirm sys driver and sOS 355 * are already been loaded. 356 */ 357 if (psp_v11_0_is_sos_alive(psp)) 358 return 0; 359 360 ret = psp_v11_0_wait_for_bootloader(psp); 361 if (ret) 362 return ret; 363 364 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 365 366 /* Copy Secure OS binary to PSP memory */ 367 memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); 368 369 /* Provide the PSP secure OS to bootloader */ 370 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, 371 (uint32_t)(psp->fw_pri_mc_addr >> 20)); 372 psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV; 373 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 374 psp_gfxdrv_command_reg); 375 376 /* there might be handshake issue with hardware which needs delay */ 377 mdelay(20); 378 ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), 379 RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 380 0, true); 381 382 return ret; 383 } 384 385 static void psp_v11_0_reroute_ih(struct psp_context *psp) 386 { 387 struct amdgpu_device *adev = psp->adev; 388 uint32_t tmp; 389 390 /* Change IH ring for VMC */ 391 tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b); 392 tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1); 393 tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); 394 395 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3); 396 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp); 397 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET); 398 399 mdelay(20); 400 psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), 401 0x80000000, 0x8000FFFF, false); 402 403 /* Change IH ring for UMC */ 404 tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b); 405 tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1); 406 407 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4); 408 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp); 409 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET); 410 411 mdelay(20); 412 psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), 413 0x80000000, 0x8000FFFF, false); 414 } 415 416 static int psp_v11_0_ring_init(struct psp_context *psp, 417 enum psp_ring_type ring_type) 418 { 419 int ret = 0; 420 struct psp_ring *ring; 421 struct amdgpu_device *adev = psp->adev; 422 423 psp_v11_0_reroute_ih(psp); 424 425 ring = &psp->km_ring; 426 427 ring->ring_type = ring_type; 428 429 /* allocate 4k Page of Local Frame Buffer memory for ring */ 430 ring->ring_size = 0x1000; 431 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 432 AMDGPU_GEM_DOMAIN_VRAM, 433 &adev->firmware.rbuf, 434 &ring->ring_mem_mc_addr, 435 (void **)&ring->ring_mem); 436 if (ret) { 437 ring->ring_size = 0; 438 return ret; 439 } 440 441 return 0; 442 } 443 444 static bool psp_v11_0_support_vmr_ring(struct psp_context *psp) 445 { 446 if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045) 447 return true; 448 return false; 449 } 450 451 static int psp_v11_0_ring_stop(struct psp_context *psp, 452 enum psp_ring_type ring_type) 453 { 454 int ret = 0; 455 struct amdgpu_device *adev = psp->adev; 456 457 /* Write the ring destroy command*/ 458 if (psp_v11_0_support_vmr_ring(psp)) 459 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, 460 GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); 461 else 462 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, 463 GFX_CTRL_CMD_ID_DESTROY_RINGS); 464 465 /* there might be handshake issue with hardware which needs delay */ 466 mdelay(20); 467 468 /* Wait for response flag (bit 31) */ 469 if (psp_v11_0_support_vmr_ring(psp)) 470 ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), 471 0x80000000, 0x80000000, false); 472 else 473 ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), 474 0x80000000, 0x80000000, false); 475 476 return ret; 477 } 478 479 static int psp_v11_0_ring_create(struct psp_context *psp, 480 enum psp_ring_type ring_type) 481 { 482 int ret = 0; 483 unsigned int psp_ring_reg = 0; 484 struct psp_ring *ring = &psp->km_ring; 485 struct amdgpu_device *adev = psp->adev; 486 487 if (psp_v11_0_support_vmr_ring(psp)) { 488 ret = psp_v11_0_ring_stop(psp, ring_type); 489 if (ret) { 490 DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n"); 491 return ret; 492 } 493 494 /* Write low address of the ring to C2PMSG_102 */ 495 psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); 496 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); 497 /* Write high address of the ring to C2PMSG_103 */ 498 psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); 499 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg); 500 501 /* Write the ring initialization command to C2PMSG_101 */ 502 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, 503 GFX_CTRL_CMD_ID_INIT_GPCOM_RING); 504 505 /* there might be handshake issue with hardware which needs delay */ 506 mdelay(20); 507 508 /* Wait for response flag (bit 31) in C2PMSG_101 */ 509 ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), 510 0x80000000, 0x8000FFFF, false); 511 512 } else { 513 /* Wait for sOS ready for ring creation */ 514 ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), 515 0x80000000, 0x80000000, false); 516 if (ret) { 517 DRM_ERROR("Failed to wait for sOS ready for ring creation\n"); 518 return ret; 519 } 520 521 /* Write low address of the ring to C2PMSG_69 */ 522 psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); 523 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); 524 /* Write high address of the ring to C2PMSG_70 */ 525 psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr); 526 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg); 527 /* Write size of ring to C2PMSG_71 */ 528 psp_ring_reg = ring->ring_size; 529 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg); 530 /* Write the ring initialization command to C2PMSG_64 */ 531 psp_ring_reg = ring_type; 532 psp_ring_reg = psp_ring_reg << 16; 533 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); 534 535 /* there might be handshake issue with hardware which needs delay */ 536 mdelay(20); 537 538 /* Wait for response flag (bit 31) in C2PMSG_64 */ 539 ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), 540 0x80000000, 0x8000FFFF, false); 541 } 542 543 return ret; 544 } 545 546 547 static int psp_v11_0_ring_destroy(struct psp_context *psp, 548 enum psp_ring_type ring_type) 549 { 550 int ret = 0; 551 struct psp_ring *ring = &psp->km_ring; 552 struct amdgpu_device *adev = psp->adev; 553 554 ret = psp_v11_0_ring_stop(psp, ring_type); 555 if (ret) 556 DRM_ERROR("Fail to stop psp ring\n"); 557 558 amdgpu_bo_free_kernel(&adev->firmware.rbuf, 559 &ring->ring_mem_mc_addr, 560 (void **)&ring->ring_mem); 561 562 return ret; 563 } 564 565 static int 566 psp_v11_0_sram_map(struct amdgpu_device *adev, 567 unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, 568 unsigned int *sram_data_reg_offset, 569 enum AMDGPU_UCODE_ID ucode_id) 570 { 571 int ret = 0; 572 573 switch (ucode_id) { 574 /* TODO: needs to confirm */ 575 #if 0 576 case AMDGPU_UCODE_ID_SMC: 577 *sram_offset = 0; 578 *sram_addr_reg_offset = 0; 579 *sram_data_reg_offset = 0; 580 break; 581 #endif 582 583 case AMDGPU_UCODE_ID_CP_CE: 584 *sram_offset = 0x0; 585 *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR); 586 *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA); 587 break; 588 589 case AMDGPU_UCODE_ID_CP_PFP: 590 *sram_offset = 0x0; 591 *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR); 592 *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA); 593 break; 594 595 case AMDGPU_UCODE_ID_CP_ME: 596 *sram_offset = 0x0; 597 *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR); 598 *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA); 599 break; 600 601 case AMDGPU_UCODE_ID_CP_MEC1: 602 *sram_offset = 0x10000; 603 *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR); 604 *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA); 605 break; 606 607 case AMDGPU_UCODE_ID_CP_MEC2: 608 *sram_offset = 0x10000; 609 *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR); 610 *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA); 611 break; 612 613 case AMDGPU_UCODE_ID_RLC_G: 614 *sram_offset = 0x2000; 615 if (adev->asic_type < CHIP_NAVI10) { 616 *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR); 617 *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA); 618 } else { 619 *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_ADDR_NV10; 620 *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_DATA_NV10; 621 } 622 break; 623 624 case AMDGPU_UCODE_ID_SDMA0: 625 *sram_offset = 0x0; 626 if (adev->asic_type < CHIP_NAVI10) { 627 *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR); 628 *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA); 629 } else { 630 *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_ADDR_NV10; 631 *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_DATA_NV10; 632 } 633 break; 634 635 /* TODO: needs to confirm */ 636 #if 0 637 case AMDGPU_UCODE_ID_SDMA1: 638 *sram_offset = ; 639 *sram_addr_reg_offset = ; 640 break; 641 642 case AMDGPU_UCODE_ID_UVD: 643 *sram_offset = ; 644 *sram_addr_reg_offset = ; 645 break; 646 647 case AMDGPU_UCODE_ID_VCE: 648 *sram_offset = ; 649 *sram_addr_reg_offset = ; 650 break; 651 #endif 652 653 case AMDGPU_UCODE_ID_MAXIMUM: 654 default: 655 ret = -EINVAL; 656 break; 657 } 658 659 return ret; 660 } 661 662 static bool psp_v11_0_compare_sram_data(struct psp_context *psp, 663 struct amdgpu_firmware_info *ucode, 664 enum AMDGPU_UCODE_ID ucode_type) 665 { 666 int err = 0; 667 unsigned int fw_sram_reg_val = 0; 668 unsigned int fw_sram_addr_reg_offset = 0; 669 unsigned int fw_sram_data_reg_offset = 0; 670 unsigned int ucode_size; 671 uint32_t *ucode_mem = NULL; 672 struct amdgpu_device *adev = psp->adev; 673 674 err = psp_v11_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset, 675 &fw_sram_data_reg_offset, ucode_type); 676 if (err) 677 return false; 678 679 WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val); 680 681 ucode_size = ucode->ucode_size; 682 ucode_mem = (uint32_t *)ucode->kaddr; 683 while (ucode_size) { 684 fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); 685 686 if (*ucode_mem != fw_sram_reg_val) 687 return false; 688 689 ucode_mem++; 690 /* 4 bytes */ 691 ucode_size -= 4; 692 } 693 694 return true; 695 } 696 697 static int psp_v11_0_mode1_reset(struct psp_context *psp) 698 { 699 int ret; 700 uint32_t offset; 701 struct amdgpu_device *adev = psp->adev; 702 703 offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); 704 705 ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); 706 707 if (ret) { 708 DRM_INFO("psp is not working correctly before mode1 reset!\n"); 709 return -EINVAL; 710 } 711 712 /*send the mode 1 reset command*/ 713 WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST); 714 715 msleep(500); 716 717 offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); 718 719 ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); 720 721 if (ret) { 722 DRM_INFO("psp mode 1 reset failed!\n"); 723 return -EINVAL; 724 } 725 726 DRM_INFO("psp mode1 reset succeed \n"); 727 728 return 0; 729 } 730 731 /* TODO: Fill in follow functions once PSP firmware interface for XGMI is ready. 732 * For now, return success and hack the hive_id so high level code can 733 * start testing 734 */ 735 static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp, 736 int number_devices, struct psp_xgmi_topology_info *topology) 737 { 738 struct ta_xgmi_shared_memory *xgmi_cmd; 739 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 740 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 741 int i; 742 int ret; 743 744 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 745 return -EINVAL; 746 747 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 748 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 749 750 /* Fill in the shared memory with topology information as input */ 751 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 752 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO; 753 topology_info_input->num_nodes = number_devices; 754 755 for (i = 0; i < topology_info_input->num_nodes; i++) { 756 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 757 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 758 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 759 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 760 } 761 762 /* Invoke xgmi ta to get the topology information */ 763 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO); 764 if (ret) 765 return ret; 766 767 /* Read the output topology information from the shared memory */ 768 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 769 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 770 for (i = 0; i < topology->num_nodes; i++) { 771 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 772 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 773 topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled; 774 topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine; 775 } 776 777 return 0; 778 } 779 780 static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp, 781 int number_devices, struct psp_xgmi_topology_info *topology) 782 { 783 struct ta_xgmi_shared_memory *xgmi_cmd; 784 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 785 int i; 786 787 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 788 return -EINVAL; 789 790 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 791 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 792 793 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 794 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 795 topology_info_input->num_nodes = number_devices; 796 797 for (i = 0; i < topology_info_input->num_nodes; i++) { 798 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 799 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 800 topology_info_input->nodes[i].is_sharing_enabled = 1; 801 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 802 } 803 804 /* Invoke xgmi ta to set topology information */ 805 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 806 } 807 808 static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 809 { 810 struct ta_xgmi_shared_memory *xgmi_cmd; 811 int ret; 812 813 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 814 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 815 816 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 817 818 /* Invoke xgmi ta to get hive id */ 819 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 820 if (ret) 821 return ret; 822 823 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 824 825 return 0; 826 } 827 828 static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 829 { 830 struct ta_xgmi_shared_memory *xgmi_cmd; 831 int ret; 832 833 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 834 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 835 836 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 837 838 /* Invoke xgmi ta to get the node id */ 839 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 840 if (ret) 841 return ret; 842 843 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 844 845 return 0; 846 } 847 848 static int psp_v11_0_ras_trigger_error(struct psp_context *psp, 849 struct ta_ras_trigger_error_input *info) 850 { 851 struct ta_ras_shared_memory *ras_cmd; 852 int ret; 853 854 if (!psp->ras.ras_initialized) 855 return -EINVAL; 856 857 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 858 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 859 860 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; 861 ras_cmd->ras_in_message.trigger_error = *info; 862 863 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 864 if (ret) 865 return -EINVAL; 866 867 return ras_cmd->ras_status; 868 } 869 870 static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr) 871 { 872 #if 0 873 // not support yet. 874 struct ta_ras_shared_memory *ras_cmd; 875 int ret; 876 877 if (!psp->ras.ras_initialized) 878 return -EINVAL; 879 880 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 881 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 882 883 ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON; 884 ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr; 885 886 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 887 if (ret) 888 return -EINVAL; 889 890 return ras_cmd->ras_status; 891 #else 892 return -EINVAL; 893 #endif 894 } 895 896 static int psp_v11_0_rlc_autoload_start(struct psp_context *psp) 897 { 898 return psp_rlc_autoload_start(psp); 899 } 900 901 static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg) 902 { 903 int ret; 904 int i; 905 uint32_t data_32; 906 int max_wait; 907 struct amdgpu_device *adev = psp->adev; 908 909 data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20); 910 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, data_32); 911 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, msg); 912 913 max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; 914 for (i = 0; i < max_wait; i++) { 915 ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), 916 0x80000000, 0x80000000, false); 917 if (ret == 0) 918 break; 919 } 920 if (i < max_wait) 921 ret = 0; 922 else 923 ret = -ETIME; 924 925 DRM_DEBUG("training %s %s, cost %d @ %d ms\n", 926 (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long", 927 (ret == 0) ? "succeed" : "failed", 928 i, adev->usec_timeout/1000); 929 return ret; 930 } 931 932 static void psp_v11_0_memory_training_fini(struct psp_context *psp) 933 { 934 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 935 936 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 937 kfree(ctx->sys_cache); 938 ctx->sys_cache = NULL; 939 } 940 941 static int psp_v11_0_memory_training_init(struct psp_context *psp) 942 { 943 int ret; 944 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 945 946 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 947 DRM_DEBUG("memory training is not supported!\n"); 948 return 0; 949 } 950 951 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 952 if (ctx->sys_cache == NULL) { 953 DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); 954 ret = -ENOMEM; 955 goto Err_out; 956 } 957 958 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 959 ctx->train_data_size, 960 ctx->p2c_train_data_offset, 961 ctx->c2p_train_data_offset); 962 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 963 return 0; 964 965 Err_out: 966 psp_v11_0_memory_training_fini(psp); 967 return ret; 968 } 969 970 /* 971 * save and restore proces 972 */ 973 static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) 974 { 975 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 976 uint32_t *pcache = (uint32_t*)ctx->sys_cache; 977 struct amdgpu_device *adev = psp->adev; 978 uint32_t p2c_header[4]; 979 uint32_t sz; 980 void *buf; 981 int ret; 982 983 if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) { 984 DRM_DEBUG("Memory training is not supported.\n"); 985 return 0; 986 } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) { 987 DRM_ERROR("Memory training initialization failure.\n"); 988 return -EINVAL; 989 } 990 991 if (psp_v11_0_is_sos_alive(psp)) { 992 DRM_DEBUG("SOS is alive, skip memory training.\n"); 993 return 0; 994 } 995 996 amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); 997 DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n", 998 pcache[0], pcache[1], pcache[2], pcache[3], 999 p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]); 1000 1001 if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) { 1002 DRM_DEBUG("Short training depends on restore.\n"); 1003 ops |= PSP_MEM_TRAIN_RESTORE; 1004 } 1005 1006 if ((ops & PSP_MEM_TRAIN_RESTORE) && 1007 pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) { 1008 DRM_DEBUG("sys_cache[0] is invalid, restore depends on save.\n"); 1009 ops |= PSP_MEM_TRAIN_SAVE; 1010 } 1011 1012 if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE && 1013 !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE && 1014 pcache[3] == p2c_header[3])) { 1015 DRM_DEBUG("sys_cache is invalid or out-of-date, need save training data to sys_cache.\n"); 1016 ops |= PSP_MEM_TRAIN_SAVE; 1017 } 1018 1019 if ((ops & PSP_MEM_TRAIN_SAVE) && 1020 p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) { 1021 DRM_DEBUG("p2c_header[0] is invalid, save depends on long training.\n"); 1022 ops |= PSP_MEM_TRAIN_SEND_LONG_MSG; 1023 } 1024 1025 if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { 1026 ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG; 1027 ops |= PSP_MEM_TRAIN_SAVE; 1028 } 1029 1030 DRM_DEBUG("Memory training ops:%x.\n", ops); 1031 1032 if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { 1033 /* 1034 * Long traing will encroach certain mount of bottom VRAM, 1035 * saving the content of this bottom VRAM to system memory 1036 * before training, and restoring it after training to avoid 1037 * VRAM corruption. 1038 */ 1039 sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE; 1040 1041 if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) { 1042 DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n", 1043 adev->gmc.visible_vram_size, 1044 adev->mman.aper_base_kaddr); 1045 return -EINVAL; 1046 } 1047 1048 buf = vmalloc(sz); 1049 if (!buf) { 1050 DRM_ERROR("failed to allocate system memory.\n"); 1051 return -ENOMEM; 1052 } 1053 1054 memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz); 1055 ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); 1056 if (ret) { 1057 DRM_ERROR("Send long training msg failed.\n"); 1058 vfree(buf); 1059 return ret; 1060 } 1061 1062 memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); 1063 adev->nbio.funcs->hdp_flush(adev, NULL); 1064 vfree(buf); 1065 } 1066 1067 if (ops & PSP_MEM_TRAIN_SAVE) { 1068 amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false); 1069 } 1070 1071 if (ops & PSP_MEM_TRAIN_RESTORE) { 1072 amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true); 1073 } 1074 1075 if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) { 1076 ret = psp_v11_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ? 1077 PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN); 1078 if (ret) { 1079 DRM_ERROR("send training msg failed.\n"); 1080 return ret; 1081 } 1082 } 1083 ctx->training_cnt++; 1084 return 0; 1085 } 1086 1087 static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp) 1088 { 1089 uint32_t data; 1090 struct amdgpu_device *adev = psp->adev; 1091 1092 if (psp_v11_0_support_vmr_ring(psp)) 1093 data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); 1094 else 1095 data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); 1096 1097 return data; 1098 } 1099 1100 static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value) 1101 { 1102 struct amdgpu_device *adev = psp->adev; 1103 1104 if (psp_v11_0_support_vmr_ring(psp)) { 1105 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); 1106 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); 1107 } else 1108 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); 1109 } 1110 1111 static const struct psp_funcs psp_v11_0_funcs = { 1112 .init_microcode = psp_v11_0_init_microcode, 1113 .bootloader_load_kdb = psp_v11_0_bootloader_load_kdb, 1114 .bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv, 1115 .bootloader_load_sos = psp_v11_0_bootloader_load_sos, 1116 .ring_init = psp_v11_0_ring_init, 1117 .ring_create = psp_v11_0_ring_create, 1118 .ring_stop = psp_v11_0_ring_stop, 1119 .ring_destroy = psp_v11_0_ring_destroy, 1120 .compare_sram_data = psp_v11_0_compare_sram_data, 1121 .mode1_reset = psp_v11_0_mode1_reset, 1122 .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info, 1123 .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info, 1124 .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id, 1125 .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id, 1126 .support_vmr_ring = psp_v11_0_support_vmr_ring, 1127 .ras_trigger_error = psp_v11_0_ras_trigger_error, 1128 .ras_cure_posion = psp_v11_0_ras_cure_posion, 1129 .rlc_autoload_start = psp_v11_0_rlc_autoload_start, 1130 .mem_training_init = psp_v11_0_memory_training_init, 1131 .mem_training_fini = psp_v11_0_memory_training_fini, 1132 .mem_training = psp_v11_0_memory_training, 1133 .ring_get_wptr = psp_v11_0_ring_get_wptr, 1134 .ring_set_wptr = psp_v11_0_ring_set_wptr, 1135 }; 1136 1137 void psp_v11_0_set_psp_funcs(struct psp_context *psp) 1138 { 1139 psp->funcs = &psp_v11_0_funcs; 1140 } 1141