1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/slab.h> 26 #include <linux/module.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_ucode.h" 30 31 static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr) 32 { 33 DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes)); 34 DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes)); 35 DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major)); 36 DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor)); 37 DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major)); 38 DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor)); 39 DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version)); 40 DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes)); 41 DRM_DEBUG("ucode_array_offset_bytes: %u\n", 42 le32_to_cpu(hdr->ucode_array_offset_bytes)); 43 DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32)); 44 } 45 46 void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr) 47 { 48 uint16_t version_major = le16_to_cpu(hdr->header_version_major); 49 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); 50 51 DRM_DEBUG("MC\n"); 52 amdgpu_ucode_print_common_hdr(hdr); 53 54 if (version_major == 1) { 55 const struct mc_firmware_header_v1_0 *mc_hdr = 56 container_of(hdr, struct mc_firmware_header_v1_0, header); 57 58 DRM_DEBUG("io_debug_size_bytes: %u\n", 59 le32_to_cpu(mc_hdr->io_debug_size_bytes)); 60 DRM_DEBUG("io_debug_array_offset_bytes: %u\n", 61 le32_to_cpu(mc_hdr->io_debug_array_offset_bytes)); 62 } else { 63 DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor); 64 } 65 } 66 67 void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr) 68 { 69 uint16_t version_major = le16_to_cpu(hdr->header_version_major); 70 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); 71 const struct smc_firmware_header_v1_0 *v1_0_hdr; 72 const struct smc_firmware_header_v2_0 *v2_0_hdr; 73 const struct smc_firmware_header_v2_1 *v2_1_hdr; 74 75 DRM_DEBUG("SMC\n"); 76 amdgpu_ucode_print_common_hdr(hdr); 77 78 if (version_major == 1) { 79 v1_0_hdr = container_of(hdr, struct smc_firmware_header_v1_0, header); 80 DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(v1_0_hdr->ucode_start_addr)); 81 } else if (version_major == 2) { 82 switch (version_minor) { 83 case 0: 84 v2_0_hdr = container_of(hdr, struct smc_firmware_header_v2_0, v1_0.header); 85 DRM_DEBUG("ppt_offset_bytes: %u\n", le32_to_cpu(v2_0_hdr->ppt_offset_bytes)); 86 DRM_DEBUG("ppt_size_bytes: %u\n", le32_to_cpu(v2_0_hdr->ppt_size_bytes)); 87 break; 88 case 1: 89 v2_1_hdr = container_of(hdr, struct smc_firmware_header_v2_1, v1_0.header); 90 DRM_DEBUG("pptable_count: %u\n", le32_to_cpu(v2_1_hdr->pptable_count)); 91 DRM_DEBUG("pptable_entry_offset: %u\n", le32_to_cpu(v2_1_hdr->pptable_entry_offset)); 92 break; 93 default: 94 break; 95 } 96 97 } else { 98 DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor); 99 } 100 } 101 102 void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr) 103 { 104 uint16_t version_major = le16_to_cpu(hdr->header_version_major); 105 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); 106 107 DRM_DEBUG("GFX\n"); 108 amdgpu_ucode_print_common_hdr(hdr); 109 110 if (version_major == 1) { 111 const struct gfx_firmware_header_v1_0 *gfx_hdr = 112 container_of(hdr, struct gfx_firmware_header_v1_0, header); 113 114 DRM_DEBUG("ucode_feature_version: %u\n", 115 le32_to_cpu(gfx_hdr->ucode_feature_version)); 116 DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset)); 117 DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size)); 118 } else if (version_major == 2) { 119 const struct gfx_firmware_header_v2_0 *gfx_hdr = 120 container_of(hdr, struct gfx_firmware_header_v2_0, header); 121 122 DRM_DEBUG("ucode_feature_version: %u\n", 123 le32_to_cpu(gfx_hdr->ucode_feature_version)); 124 } else { 125 DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor); 126 } 127 } 128 129 void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr) 130 { 131 uint16_t version_major = le16_to_cpu(hdr->header_version_major); 132 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); 133 134 DRM_DEBUG("IMU\n"); 135 amdgpu_ucode_print_common_hdr(hdr); 136 137 if (version_major != 1) { 138 DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor); 139 } 140 } 141 142 void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr) 143 { 144 uint16_t version_major = le16_to_cpu(hdr->header_version_major); 145 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); 146 147 DRM_DEBUG("RLC\n"); 148 amdgpu_ucode_print_common_hdr(hdr); 149 150 if (version_major == 1) { 151 const struct rlc_firmware_header_v1_0 *rlc_hdr = 152 container_of(hdr, struct rlc_firmware_header_v1_0, header); 153 154 DRM_DEBUG("ucode_feature_version: %u\n", 155 le32_to_cpu(rlc_hdr->ucode_feature_version)); 156 DRM_DEBUG("save_and_restore_offset: %u\n", 157 le32_to_cpu(rlc_hdr->save_and_restore_offset)); 158 DRM_DEBUG("clear_state_descriptor_offset: %u\n", 159 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset)); 160 DRM_DEBUG("avail_scratch_ram_locations: %u\n", 161 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations)); 162 DRM_DEBUG("master_pkt_description_offset: %u\n", 163 le32_to_cpu(rlc_hdr->master_pkt_description_offset)); 164 } else if (version_major == 2) { 165 const struct rlc_firmware_header_v2_0 *rlc_hdr = 166 container_of(hdr, struct rlc_firmware_header_v2_0, header); 167 168 DRM_DEBUG("ucode_feature_version: %u\n", 169 le32_to_cpu(rlc_hdr->ucode_feature_version)); 170 DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset)); 171 DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size)); 172 DRM_DEBUG("save_and_restore_offset: %u\n", 173 le32_to_cpu(rlc_hdr->save_and_restore_offset)); 174 DRM_DEBUG("clear_state_descriptor_offset: %u\n", 175 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset)); 176 DRM_DEBUG("avail_scratch_ram_locations: %u\n", 177 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations)); 178 DRM_DEBUG("reg_restore_list_size: %u\n", 179 le32_to_cpu(rlc_hdr->reg_restore_list_size)); 180 DRM_DEBUG("reg_list_format_start: %u\n", 181 le32_to_cpu(rlc_hdr->reg_list_format_start)); 182 DRM_DEBUG("reg_list_format_separate_start: %u\n", 183 le32_to_cpu(rlc_hdr->reg_list_format_separate_start)); 184 DRM_DEBUG("starting_offsets_start: %u\n", 185 le32_to_cpu(rlc_hdr->starting_offsets_start)); 186 DRM_DEBUG("reg_list_format_size_bytes: %u\n", 187 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes)); 188 DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n", 189 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); 190 DRM_DEBUG("reg_list_size_bytes: %u\n", 191 le32_to_cpu(rlc_hdr->reg_list_size_bytes)); 192 DRM_DEBUG("reg_list_array_offset_bytes: %u\n", 193 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); 194 DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n", 195 le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes)); 196 DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n", 197 le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes)); 198 DRM_DEBUG("reg_list_separate_size_bytes: %u\n", 199 le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes)); 200 DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n", 201 le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes)); 202 if (version_minor == 1) { 203 const struct rlc_firmware_header_v2_1 *v2_1 = 204 container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0); 205 DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n", 206 le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length)); 207 DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n", 208 le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver)); 209 DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n", 210 le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver)); 211 DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n", 212 le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes)); 213 DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n", 214 le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes)); 215 DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n", 216 le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver)); 217 DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n", 218 le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver)); 219 DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n", 220 le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes)); 221 DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n", 222 le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes)); 223 DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n", 224 le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver)); 225 DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n", 226 le32_to_cpu(v2_1->save_restore_list_srm_feature_ver)); 227 DRM_DEBUG("save_restore_list_srm_size_bytes %u\n", 228 le32_to_cpu(v2_1->save_restore_list_srm_size_bytes)); 229 DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n", 230 le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes)); 231 } 232 } else { 233 DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor); 234 } 235 } 236 237 void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr) 238 { 239 uint16_t version_major = le16_to_cpu(hdr->header_version_major); 240 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); 241 242 DRM_DEBUG("SDMA\n"); 243 amdgpu_ucode_print_common_hdr(hdr); 244 245 if (version_major == 1) { 246 const struct sdma_firmware_header_v1_0 *sdma_hdr = 247 container_of(hdr, struct sdma_firmware_header_v1_0, header); 248 249 DRM_DEBUG("ucode_feature_version: %u\n", 250 le32_to_cpu(sdma_hdr->ucode_feature_version)); 251 DRM_DEBUG("ucode_change_version: %u\n", 252 le32_to_cpu(sdma_hdr->ucode_change_version)); 253 DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset)); 254 DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size)); 255 if (version_minor >= 1) { 256 const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = 257 container_of(sdma_hdr, struct sdma_firmware_header_v1_1, v1_0); 258 DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr->digest_size)); 259 } 260 } else if (version_major == 2) { 261 const struct sdma_firmware_header_v2_0 *sdma_hdr = 262 container_of(hdr, struct sdma_firmware_header_v2_0, header); 263 264 DRM_DEBUG("ucode_feature_version: %u\n", 265 le32_to_cpu(sdma_hdr->ucode_feature_version)); 266 DRM_DEBUG("ctx_jt_offset: %u\n", le32_to_cpu(sdma_hdr->ctx_jt_offset)); 267 DRM_DEBUG("ctx_jt_size: %u\n", le32_to_cpu(sdma_hdr->ctx_jt_size)); 268 DRM_DEBUG("ctl_ucode_offset: %u\n", le32_to_cpu(sdma_hdr->ctl_ucode_offset)); 269 DRM_DEBUG("ctl_jt_offset: %u\n", le32_to_cpu(sdma_hdr->ctl_jt_offset)); 270 DRM_DEBUG("ctl_jt_size: %u\n", le32_to_cpu(sdma_hdr->ctl_jt_size)); 271 } else { 272 DRM_ERROR("Unknown SDMA ucode version: %u.%u\n", 273 version_major, version_minor); 274 } 275 } 276 277 void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr) 278 { 279 uint16_t version_major = le16_to_cpu(hdr->header_version_major); 280 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); 281 uint32_t fw_index; 282 const struct psp_fw_bin_desc *desc; 283 284 DRM_DEBUG("PSP\n"); 285 amdgpu_ucode_print_common_hdr(hdr); 286 287 if (version_major == 1) { 288 const struct psp_firmware_header_v1_0 *psp_hdr = 289 container_of(hdr, struct psp_firmware_header_v1_0, header); 290 291 DRM_DEBUG("ucode_feature_version: %u\n", 292 le32_to_cpu(psp_hdr->sos.fw_version)); 293 DRM_DEBUG("sos_offset_bytes: %u\n", 294 le32_to_cpu(psp_hdr->sos.offset_bytes)); 295 DRM_DEBUG("sos_size_bytes: %u\n", 296 le32_to_cpu(psp_hdr->sos.size_bytes)); 297 if (version_minor == 1) { 298 const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 = 299 container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0); 300 DRM_DEBUG("toc_header_version: %u\n", 301 le32_to_cpu(psp_hdr_v1_1->toc.fw_version)); 302 DRM_DEBUG("toc_offset_bytes: %u\n", 303 le32_to_cpu(psp_hdr_v1_1->toc.offset_bytes)); 304 DRM_DEBUG("toc_size_bytes: %u\n", 305 le32_to_cpu(psp_hdr_v1_1->toc.size_bytes)); 306 DRM_DEBUG("kdb_header_version: %u\n", 307 le32_to_cpu(psp_hdr_v1_1->kdb.fw_version)); 308 DRM_DEBUG("kdb_offset_bytes: %u\n", 309 le32_to_cpu(psp_hdr_v1_1->kdb.offset_bytes)); 310 DRM_DEBUG("kdb_size_bytes: %u\n", 311 le32_to_cpu(psp_hdr_v1_1->kdb.size_bytes)); 312 } 313 if (version_minor == 2) { 314 const struct psp_firmware_header_v1_2 *psp_hdr_v1_2 = 315 container_of(psp_hdr, struct psp_firmware_header_v1_2, v1_0); 316 DRM_DEBUG("kdb_header_version: %u\n", 317 le32_to_cpu(psp_hdr_v1_2->kdb.fw_version)); 318 DRM_DEBUG("kdb_offset_bytes: %u\n", 319 le32_to_cpu(psp_hdr_v1_2->kdb.offset_bytes)); 320 DRM_DEBUG("kdb_size_bytes: %u\n", 321 le32_to_cpu(psp_hdr_v1_2->kdb.size_bytes)); 322 } 323 if (version_minor == 3) { 324 const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 = 325 container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0); 326 const struct psp_firmware_header_v1_3 *psp_hdr_v1_3 = 327 container_of(psp_hdr_v1_1, struct psp_firmware_header_v1_3, v1_1); 328 DRM_DEBUG("toc_header_version: %u\n", 329 le32_to_cpu(psp_hdr_v1_3->v1_1.toc.fw_version)); 330 DRM_DEBUG("toc_offset_bytes: %u\n", 331 le32_to_cpu(psp_hdr_v1_3->v1_1.toc.offset_bytes)); 332 DRM_DEBUG("toc_size_bytes: %u\n", 333 le32_to_cpu(psp_hdr_v1_3->v1_1.toc.size_bytes)); 334 DRM_DEBUG("kdb_header_version: %u\n", 335 le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.fw_version)); 336 DRM_DEBUG("kdb_offset_bytes: %u\n", 337 le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.offset_bytes)); 338 DRM_DEBUG("kdb_size_bytes: %u\n", 339 le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.size_bytes)); 340 DRM_DEBUG("spl_header_version: %u\n", 341 le32_to_cpu(psp_hdr_v1_3->spl.fw_version)); 342 DRM_DEBUG("spl_offset_bytes: %u\n", 343 le32_to_cpu(psp_hdr_v1_3->spl.offset_bytes)); 344 DRM_DEBUG("spl_size_bytes: %u\n", 345 le32_to_cpu(psp_hdr_v1_3->spl.size_bytes)); 346 } 347 } else if (version_major == 2) { 348 const struct psp_firmware_header_v2_0 *psp_hdr_v2_0 = 349 container_of(hdr, struct psp_firmware_header_v2_0, header); 350 for (fw_index = 0; fw_index < le32_to_cpu(psp_hdr_v2_0->psp_fw_bin_count); fw_index++) { 351 desc = &(psp_hdr_v2_0->psp_fw_bin[fw_index]); 352 switch (desc->fw_type) { 353 case PSP_FW_TYPE_PSP_SOS: 354 DRM_DEBUG("psp_sos_version: %u\n", 355 le32_to_cpu(desc->fw_version)); 356 DRM_DEBUG("psp_sos_size_bytes: %u\n", 357 le32_to_cpu(desc->size_bytes)); 358 break; 359 case PSP_FW_TYPE_PSP_SYS_DRV: 360 DRM_DEBUG("psp_sys_drv_version: %u\n", 361 le32_to_cpu(desc->fw_version)); 362 DRM_DEBUG("psp_sys_drv_size_bytes: %u\n", 363 le32_to_cpu(desc->size_bytes)); 364 break; 365 case PSP_FW_TYPE_PSP_KDB: 366 DRM_DEBUG("psp_kdb_version: %u\n", 367 le32_to_cpu(desc->fw_version)); 368 DRM_DEBUG("psp_kdb_size_bytes: %u\n", 369 le32_to_cpu(desc->size_bytes)); 370 break; 371 case PSP_FW_TYPE_PSP_TOC: 372 DRM_DEBUG("psp_toc_version: %u\n", 373 le32_to_cpu(desc->fw_version)); 374 DRM_DEBUG("psp_toc_size_bytes: %u\n", 375 le32_to_cpu(desc->size_bytes)); 376 break; 377 case PSP_FW_TYPE_PSP_SPL: 378 DRM_DEBUG("psp_spl_version: %u\n", 379 le32_to_cpu(desc->fw_version)); 380 DRM_DEBUG("psp_spl_size_bytes: %u\n", 381 le32_to_cpu(desc->size_bytes)); 382 break; 383 case PSP_FW_TYPE_PSP_RL: 384 DRM_DEBUG("psp_rl_version: %u\n", 385 le32_to_cpu(desc->fw_version)); 386 DRM_DEBUG("psp_rl_size_bytes: %u\n", 387 le32_to_cpu(desc->size_bytes)); 388 break; 389 case PSP_FW_TYPE_PSP_SOC_DRV: 390 DRM_DEBUG("psp_soc_drv_version: %u\n", 391 le32_to_cpu(desc->fw_version)); 392 DRM_DEBUG("psp_soc_drv_size_bytes: %u\n", 393 le32_to_cpu(desc->size_bytes)); 394 break; 395 case PSP_FW_TYPE_PSP_INTF_DRV: 396 DRM_DEBUG("psp_intf_drv_version: %u\n", 397 le32_to_cpu(desc->fw_version)); 398 DRM_DEBUG("psp_intf_drv_size_bytes: %u\n", 399 le32_to_cpu(desc->size_bytes)); 400 break; 401 case PSP_FW_TYPE_PSP_DBG_DRV: 402 DRM_DEBUG("psp_dbg_drv_version: %u\n", 403 le32_to_cpu(desc->fw_version)); 404 DRM_DEBUG("psp_dbg_drv_size_bytes: %u\n", 405 le32_to_cpu(desc->size_bytes)); 406 break; 407 default: 408 DRM_DEBUG("Unsupported PSP fw type: %d\n", desc->fw_type); 409 break; 410 } 411 } 412 } else { 413 DRM_ERROR("Unknown PSP ucode version: %u.%u\n", 414 version_major, version_minor); 415 } 416 } 417 418 void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr) 419 { 420 uint16_t version_major = le16_to_cpu(hdr->header_version_major); 421 uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); 422 423 DRM_DEBUG("GPU_INFO\n"); 424 amdgpu_ucode_print_common_hdr(hdr); 425 426 if (version_major == 1) { 427 const struct gpu_info_firmware_header_v1_0 *gpu_info_hdr = 428 container_of(hdr, struct gpu_info_firmware_header_v1_0, header); 429 430 DRM_DEBUG("version_major: %u\n", 431 le16_to_cpu(gpu_info_hdr->version_major)); 432 DRM_DEBUG("version_minor: %u\n", 433 le16_to_cpu(gpu_info_hdr->version_minor)); 434 } else { 435 DRM_ERROR("Unknown gpu_info ucode version: %u.%u\n", version_major, version_minor); 436 } 437 } 438 439 int amdgpu_ucode_validate(const struct firmware *fw) 440 { 441 const struct common_firmware_header *hdr = 442 (const struct common_firmware_header *)fw->data; 443 444 if (fw->size == le32_to_cpu(hdr->size_bytes)) 445 return 0; 446 447 return -EINVAL; 448 } 449 450 bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr, 451 uint16_t hdr_major, uint16_t hdr_minor) 452 { 453 if ((hdr->common.header_version_major == hdr_major) && 454 (hdr->common.header_version_minor == hdr_minor)) 455 return true; 456 return false; 457 } 458 459 enum amdgpu_firmware_load_type 460 amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) 461 { 462 switch (adev->asic_type) { 463 #ifdef CONFIG_DRM_AMDGPU_SI 464 case CHIP_TAHITI: 465 case CHIP_PITCAIRN: 466 case CHIP_VERDE: 467 case CHIP_OLAND: 468 case CHIP_HAINAN: 469 return AMDGPU_FW_LOAD_DIRECT; 470 #endif 471 #ifdef CONFIG_DRM_AMDGPU_CIK 472 case CHIP_BONAIRE: 473 case CHIP_KAVERI: 474 case CHIP_KABINI: 475 case CHIP_HAWAII: 476 case CHIP_MULLINS: 477 return AMDGPU_FW_LOAD_DIRECT; 478 #endif 479 case CHIP_TOPAZ: 480 case CHIP_TONGA: 481 case CHIP_FIJI: 482 case CHIP_CARRIZO: 483 case CHIP_STONEY: 484 case CHIP_POLARIS10: 485 case CHIP_POLARIS11: 486 case CHIP_POLARIS12: 487 case CHIP_VEGAM: 488 return AMDGPU_FW_LOAD_SMU; 489 case CHIP_VEGA10: 490 case CHIP_RAVEN: 491 case CHIP_VEGA12: 492 case CHIP_VEGA20: 493 case CHIP_ARCTURUS: 494 case CHIP_RENOIR: 495 case CHIP_NAVI10: 496 case CHIP_NAVI14: 497 case CHIP_NAVI12: 498 case CHIP_SIENNA_CICHLID: 499 case CHIP_NAVY_FLOUNDER: 500 case CHIP_VANGOGH: 501 case CHIP_DIMGREY_CAVEFISH: 502 case CHIP_ALDEBARAN: 503 case CHIP_BEIGE_GOBY: 504 case CHIP_YELLOW_CARP: 505 if (!load_type) 506 return AMDGPU_FW_LOAD_DIRECT; 507 else 508 return AMDGPU_FW_LOAD_PSP; 509 case CHIP_CYAN_SKILLFISH: 510 if (!(load_type && 511 adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)) 512 return AMDGPU_FW_LOAD_DIRECT; 513 else 514 return AMDGPU_FW_LOAD_PSP; 515 default: 516 if (!load_type) 517 return AMDGPU_FW_LOAD_DIRECT; 518 else 519 return AMDGPU_FW_LOAD_PSP; 520 } 521 } 522 523 const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id) 524 { 525 switch (ucode_id) { 526 case AMDGPU_UCODE_ID_SDMA0: 527 return "SDMA0"; 528 case AMDGPU_UCODE_ID_SDMA1: 529 return "SDMA1"; 530 case AMDGPU_UCODE_ID_SDMA2: 531 return "SDMA2"; 532 case AMDGPU_UCODE_ID_SDMA3: 533 return "SDMA3"; 534 case AMDGPU_UCODE_ID_SDMA4: 535 return "SDMA4"; 536 case AMDGPU_UCODE_ID_SDMA5: 537 return "SDMA5"; 538 case AMDGPU_UCODE_ID_SDMA6: 539 return "SDMA6"; 540 case AMDGPU_UCODE_ID_SDMA7: 541 return "SDMA7"; 542 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 543 return "SDMA_CTX"; 544 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 545 return "SDMA_CTL"; 546 case AMDGPU_UCODE_ID_CP_CE: 547 return "CP_CE"; 548 case AMDGPU_UCODE_ID_CP_PFP: 549 return "CP_PFP"; 550 case AMDGPU_UCODE_ID_CP_ME: 551 return "CP_ME"; 552 case AMDGPU_UCODE_ID_CP_MEC1: 553 return "CP_MEC1"; 554 case AMDGPU_UCODE_ID_CP_MEC1_JT: 555 return "CP_MEC1_JT"; 556 case AMDGPU_UCODE_ID_CP_MEC2: 557 return "CP_MEC2"; 558 case AMDGPU_UCODE_ID_CP_MEC2_JT: 559 return "CP_MEC2_JT"; 560 case AMDGPU_UCODE_ID_CP_MES: 561 return "CP_MES"; 562 case AMDGPU_UCODE_ID_CP_MES_DATA: 563 return "CP_MES_DATA"; 564 case AMDGPU_UCODE_ID_CP_MES1: 565 return "CP_MES_KIQ"; 566 case AMDGPU_UCODE_ID_CP_MES1_DATA: 567 return "CP_MES_KIQ_DATA"; 568 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 569 return "RLC_RESTORE_LIST_CNTL"; 570 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 571 return "RLC_RESTORE_LIST_GPM_MEM"; 572 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 573 return "RLC_RESTORE_LIST_SRM_MEM"; 574 case AMDGPU_UCODE_ID_RLC_IRAM: 575 return "RLC_IRAM"; 576 case AMDGPU_UCODE_ID_RLC_DRAM: 577 return "RLC_DRAM"; 578 case AMDGPU_UCODE_ID_RLC_G: 579 return "RLC_G"; 580 case AMDGPU_UCODE_ID_RLC_P: 581 return "RLC_P"; 582 case AMDGPU_UCODE_ID_RLC_V: 583 return "RLC_V"; 584 case AMDGPU_UCODE_ID_IMU_I: 585 return "IMU_I"; 586 case AMDGPU_UCODE_ID_IMU_D: 587 return "IMU_D"; 588 case AMDGPU_UCODE_ID_STORAGE: 589 return "STORAGE"; 590 case AMDGPU_UCODE_ID_SMC: 591 return "SMC"; 592 case AMDGPU_UCODE_ID_PPTABLE: 593 return "PPTABLE"; 594 case AMDGPU_UCODE_ID_UVD: 595 return "UVD"; 596 case AMDGPU_UCODE_ID_UVD1: 597 return "UVD1"; 598 case AMDGPU_UCODE_ID_VCE: 599 return "VCE"; 600 case AMDGPU_UCODE_ID_VCN: 601 return "VCN"; 602 case AMDGPU_UCODE_ID_VCN1: 603 return "VCN1"; 604 case AMDGPU_UCODE_ID_DMCU_ERAM: 605 return "DMCU_ERAM"; 606 case AMDGPU_UCODE_ID_DMCU_INTV: 607 return "DMCU_INTV"; 608 case AMDGPU_UCODE_ID_VCN0_RAM: 609 return "VCN0_RAM"; 610 case AMDGPU_UCODE_ID_VCN1_RAM: 611 return "VCN1_RAM"; 612 case AMDGPU_UCODE_ID_DMCUB: 613 return "DMCUB"; 614 default: 615 return "UNKNOWN UCODE"; 616 } 617 } 618 619 #define FW_VERSION_ATTR(name, mode, field) \ 620 static ssize_t show_##name(struct device *dev, \ 621 struct device_attribute *attr, \ 622 char *buf) \ 623 { \ 624 struct drm_device *ddev = dev_get_drvdata(dev); \ 625 struct amdgpu_device *adev = drm_to_adev(ddev); \ 626 \ 627 return sysfs_emit(buf, "0x%08x\n", adev->field); \ 628 } \ 629 static DEVICE_ATTR(name, mode, show_##name, NULL) 630 631 FW_VERSION_ATTR(vce_fw_version, 0444, vce.fw_version); 632 FW_VERSION_ATTR(uvd_fw_version, 0444, uvd.fw_version); 633 FW_VERSION_ATTR(mc_fw_version, 0444, gmc.fw_version); 634 FW_VERSION_ATTR(me_fw_version, 0444, gfx.me_fw_version); 635 FW_VERSION_ATTR(pfp_fw_version, 0444, gfx.pfp_fw_version); 636 FW_VERSION_ATTR(ce_fw_version, 0444, gfx.ce_fw_version); 637 FW_VERSION_ATTR(rlc_fw_version, 0444, gfx.rlc_fw_version); 638 FW_VERSION_ATTR(rlc_srlc_fw_version, 0444, gfx.rlc_srlc_fw_version); 639 FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version); 640 FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version); 641 FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version); 642 FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version); 643 FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version); 644 FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version); 645 FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version); 646 FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi_context.context.bin_desc.fw_version); 647 FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version); 648 FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version); 649 FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version); 650 FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version); 651 FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version); 652 653 static struct attribute *fw_attrs[] = { 654 &dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr, 655 &dev_attr_mc_fw_version.attr, &dev_attr_me_fw_version.attr, 656 &dev_attr_pfp_fw_version.attr, &dev_attr_ce_fw_version.attr, 657 &dev_attr_rlc_fw_version.attr, &dev_attr_rlc_srlc_fw_version.attr, 658 &dev_attr_rlc_srlg_fw_version.attr, &dev_attr_rlc_srls_fw_version.attr, 659 &dev_attr_mec_fw_version.attr, &dev_attr_mec2_fw_version.attr, 660 &dev_attr_sos_fw_version.attr, &dev_attr_asd_fw_version.attr, 661 &dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr, 662 &dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr, 663 &dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr, 664 &dev_attr_dmcu_fw_version.attr, NULL 665 }; 666 667 static const struct attribute_group fw_attr_group = { 668 .name = "fw_version", 669 .attrs = fw_attrs 670 }; 671 672 int amdgpu_ucode_sysfs_init(struct amdgpu_device *adev) 673 { 674 return sysfs_create_group(&adev->dev->kobj, &fw_attr_group); 675 } 676 677 void amdgpu_ucode_sysfs_fini(struct amdgpu_device *adev) 678 { 679 sysfs_remove_group(&adev->dev->kobj, &fw_attr_group); 680 } 681 682 static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, 683 struct amdgpu_firmware_info *ucode, 684 uint64_t mc_addr, void *kptr) 685 { 686 const struct common_firmware_header *header = NULL; 687 const struct gfx_firmware_header_v1_0 *cp_hdr = NULL; 688 const struct gfx_firmware_header_v2_0 *cpv2_hdr = NULL; 689 const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL; 690 const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL; 691 const struct mes_firmware_header_v1_0 *mes_hdr = NULL; 692 const struct sdma_firmware_header_v2_0 *sdma_hdr = NULL; 693 const struct imu_firmware_header_v1_0 *imu_hdr = NULL; 694 u8 *ucode_addr; 695 696 if (NULL == ucode->fw) 697 return 0; 698 699 ucode->mc_addr = mc_addr; 700 ucode->kaddr = kptr; 701 702 if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE) 703 return 0; 704 705 header = (const struct common_firmware_header *)ucode->fw->data; 706 cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; 707 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)ucode->fw->data; 708 dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data; 709 dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data; 710 mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data; 711 sdma_hdr = (const struct sdma_firmware_header_v2_0 *)ucode->fw->data; 712 imu_hdr = (const struct imu_firmware_header_v1_0 *)ucode->fw->data; 713 714 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 715 switch (ucode->ucode_id) { 716 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0: 717 ucode->ucode_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes); 718 ucode_addr = (u8 *)ucode->fw->data + 719 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes); 720 break; 721 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1: 722 ucode->ucode_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes); 723 ucode_addr = (u8 *)ucode->fw->data + 724 le32_to_cpu(sdma_hdr->ctl_ucode_offset); 725 break; 726 case AMDGPU_UCODE_ID_CP_MEC1: 727 case AMDGPU_UCODE_ID_CP_MEC2: 728 ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - 729 le32_to_cpu(cp_hdr->jt_size) * 4; 730 ucode_addr = (u8 *)ucode->fw->data + 731 le32_to_cpu(header->ucode_array_offset_bytes); 732 break; 733 case AMDGPU_UCODE_ID_CP_MEC1_JT: 734 case AMDGPU_UCODE_ID_CP_MEC2_JT: 735 ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4; 736 ucode_addr = (u8 *)ucode->fw->data + 737 le32_to_cpu(header->ucode_array_offset_bytes) + 738 le32_to_cpu(cp_hdr->jt_offset) * 4; 739 break; 740 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 741 ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; 742 ucode_addr = adev->gfx.rlc.save_restore_list_cntl; 743 break; 744 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 745 ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes; 746 ucode_addr = adev->gfx.rlc.save_restore_list_gpm; 747 break; 748 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 749 ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes; 750 ucode_addr = adev->gfx.rlc.save_restore_list_srm; 751 break; 752 case AMDGPU_UCODE_ID_RLC_IRAM: 753 ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes; 754 ucode_addr = adev->gfx.rlc.rlc_iram_ucode; 755 break; 756 case AMDGPU_UCODE_ID_RLC_DRAM: 757 ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes; 758 ucode_addr = adev->gfx.rlc.rlc_dram_ucode; 759 break; 760 case AMDGPU_UCODE_ID_RLC_P: 761 ucode->ucode_size = adev->gfx.rlc.rlcp_ucode_size_bytes; 762 ucode_addr = adev->gfx.rlc.rlcp_ucode; 763 break; 764 case AMDGPU_UCODE_ID_RLC_V: 765 ucode->ucode_size = adev->gfx.rlc.rlcv_ucode_size_bytes; 766 ucode_addr = adev->gfx.rlc.rlcv_ucode; 767 break; 768 case AMDGPU_UCODE_ID_CP_MES: 769 ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 770 ucode_addr = (u8 *)ucode->fw->data + 771 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes); 772 break; 773 case AMDGPU_UCODE_ID_CP_MES_DATA: 774 ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 775 ucode_addr = (u8 *)ucode->fw->data + 776 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes); 777 break; 778 case AMDGPU_UCODE_ID_CP_MES1: 779 ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 780 ucode_addr = (u8 *)ucode->fw->data + 781 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes); 782 break; 783 case AMDGPU_UCODE_ID_CP_MES1_DATA: 784 ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 785 ucode_addr = (u8 *)ucode->fw->data + 786 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes); 787 break; 788 case AMDGPU_UCODE_ID_DMCU_ERAM: 789 ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - 790 le32_to_cpu(dmcu_hdr->intv_size_bytes); 791 ucode_addr = (u8 *)ucode->fw->data + 792 le32_to_cpu(header->ucode_array_offset_bytes); 793 break; 794 case AMDGPU_UCODE_ID_DMCU_INTV: 795 ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes); 796 ucode_addr = (u8 *)ucode->fw->data + 797 le32_to_cpu(header->ucode_array_offset_bytes) + 798 le32_to_cpu(dmcu_hdr->intv_offset_bytes); 799 break; 800 case AMDGPU_UCODE_ID_DMCUB: 801 ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes); 802 ucode_addr = (u8 *)ucode->fw->data + 803 le32_to_cpu(header->ucode_array_offset_bytes); 804 break; 805 case AMDGPU_UCODE_ID_PPTABLE: 806 ucode->ucode_size = ucode->fw->size; 807 ucode_addr = (u8 *)ucode->fw->data; 808 break; 809 case AMDGPU_UCODE_ID_IMU_I: 810 ucode->ucode_size = le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes); 811 ucode_addr = (u8 *)ucode->fw->data + 812 le32_to_cpu(imu_hdr->header.ucode_array_offset_bytes); 813 break; 814 case AMDGPU_UCODE_ID_IMU_D: 815 ucode->ucode_size = le32_to_cpu(imu_hdr->imu_dram_ucode_size_bytes); 816 ucode_addr = (u8 *)ucode->fw->data + 817 le32_to_cpu(imu_hdr->header.ucode_array_offset_bytes) + 818 le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes); 819 break; 820 case AMDGPU_UCODE_ID_CP_RS64_PFP: 821 ucode->ucode_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 822 ucode_addr = (u8 *)ucode->fw->data + 823 le32_to_cpu(header->ucode_array_offset_bytes); 824 break; 825 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: 826 ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 827 ucode_addr = (u8 *)ucode->fw->data + 828 le32_to_cpu(cpv2_hdr->data_offset_bytes); 829 break; 830 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: 831 ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 832 ucode_addr = (u8 *)ucode->fw->data + 833 le32_to_cpu(cpv2_hdr->data_offset_bytes); 834 break; 835 case AMDGPU_UCODE_ID_CP_RS64_ME: 836 ucode->ucode_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 837 ucode_addr = (u8 *)ucode->fw->data + 838 le32_to_cpu(header->ucode_array_offset_bytes); 839 break; 840 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: 841 ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 842 ucode_addr = (u8 *)ucode->fw->data + 843 le32_to_cpu(cpv2_hdr->data_offset_bytes); 844 break; 845 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: 846 ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 847 ucode_addr = (u8 *)ucode->fw->data + 848 le32_to_cpu(cpv2_hdr->data_offset_bytes); 849 break; 850 case AMDGPU_UCODE_ID_CP_RS64_MEC: 851 ucode->ucode_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 852 ucode_addr = (u8 *)ucode->fw->data + 853 le32_to_cpu(header->ucode_array_offset_bytes); 854 break; 855 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: 856 ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 857 ucode_addr = (u8 *)ucode->fw->data + 858 le32_to_cpu(cpv2_hdr->data_offset_bytes); 859 break; 860 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: 861 ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 862 ucode_addr = (u8 *)ucode->fw->data + 863 le32_to_cpu(cpv2_hdr->data_offset_bytes); 864 break; 865 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: 866 ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 867 ucode_addr = (u8 *)ucode->fw->data + 868 le32_to_cpu(cpv2_hdr->data_offset_bytes); 869 break; 870 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: 871 ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 872 ucode_addr = (u8 *)ucode->fw->data + 873 le32_to_cpu(cpv2_hdr->data_offset_bytes); 874 break; 875 default: 876 ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); 877 ucode_addr = (u8 *)ucode->fw->data + 878 le32_to_cpu(header->ucode_array_offset_bytes); 879 break; 880 } 881 } else { 882 ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); 883 ucode_addr = (u8 *)ucode->fw->data + 884 le32_to_cpu(header->ucode_array_offset_bytes); 885 } 886 887 memcpy(ucode->kaddr, ucode_addr, ucode->ucode_size); 888 889 return 0; 890 } 891 892 static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, 893 uint64_t mc_addr, void *kptr) 894 { 895 const struct gfx_firmware_header_v1_0 *header = NULL; 896 const struct common_firmware_header *comm_hdr = NULL; 897 uint8_t *src_addr = NULL; 898 uint8_t *dst_addr = NULL; 899 900 if (NULL == ucode->fw) 901 return 0; 902 903 comm_hdr = (const struct common_firmware_header *)ucode->fw->data; 904 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; 905 dst_addr = ucode->kaddr + 906 ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes), 907 PAGE_SIZE); 908 src_addr = (uint8_t *)ucode->fw->data + 909 le32_to_cpu(comm_hdr->ucode_array_offset_bytes) + 910 (le32_to_cpu(header->jt_offset) * 4); 911 memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4); 912 913 return 0; 914 } 915 916 int amdgpu_ucode_create_bo(struct amdgpu_device *adev) 917 { 918 if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) { 919 amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE, 920 amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 921 &adev->firmware.fw_buf, 922 &adev->firmware.fw_buf_mc, 923 &adev->firmware.fw_buf_ptr); 924 if (!adev->firmware.fw_buf) { 925 dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n"); 926 return -ENOMEM; 927 } else if (amdgpu_sriov_vf(adev)) { 928 memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size); 929 } 930 } 931 return 0; 932 } 933 934 void amdgpu_ucode_free_bo(struct amdgpu_device *adev) 935 { 936 amdgpu_bo_free_kernel(&adev->firmware.fw_buf, 937 &adev->firmware.fw_buf_mc, 938 &adev->firmware.fw_buf_ptr); 939 } 940 941 int amdgpu_ucode_init_bo(struct amdgpu_device *adev) 942 { 943 uint64_t fw_offset = 0; 944 int i; 945 struct amdgpu_firmware_info *ucode = NULL; 946 947 /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */ 948 if (!amdgpu_sriov_vf(adev) && (amdgpu_in_reset(adev) || adev->in_suspend)) 949 return 0; 950 /* 951 * if SMU loaded firmware, it needn't add SMC, UVD, and VCE 952 * ucode info here 953 */ 954 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 955 if (amdgpu_sriov_vf(adev)) 956 adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 3; 957 else 958 adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 4; 959 } else { 960 adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM; 961 } 962 963 for (i = 0; i < adev->firmware.max_ucodes; i++) { 964 ucode = &adev->firmware.ucode[i]; 965 if (ucode->fw) { 966 amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset, 967 adev->firmware.fw_buf_ptr + fw_offset); 968 if (i == AMDGPU_UCODE_ID_CP_MEC1 && 969 adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 970 const struct gfx_firmware_header_v1_0 *cp_hdr; 971 cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; 972 amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset, 973 adev->firmware.fw_buf_ptr + fw_offset); 974 fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); 975 } 976 fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE); 977 } 978 } 979 return 0; 980 } 981 982 void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len) 983 { 984 int maj, min, rev; 985 char *ip_name; 986 uint32_t version = adev->ip_versions[block_type][0]; 987 988 switch (block_type) { 989 case GC_HWIP: 990 ip_name = "gc"; 991 break; 992 case SDMA0_HWIP: 993 ip_name = "sdma"; 994 break; 995 case MP0_HWIP: 996 ip_name = "psp"; 997 break; 998 case MP1_HWIP: 999 ip_name = "smu"; 1000 break; 1001 case UVD_HWIP: 1002 ip_name = "vcn"; 1003 break; 1004 default: 1005 BUG(); 1006 } 1007 1008 maj = IP_VERSION_MAJ(version); 1009 min = IP_VERSION_MIN(version); 1010 rev = IP_VERSION_REV(version); 1011 1012 snprintf(ucode_prefix, len, "%s_%d_%d_%d", ip_name, maj, min, rev); 1013 } 1014