1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 #include <linux/firmware.h> 26 #include "amdgpu.h" 27 #include "amdgpu_gfx.h" 28 #include "amdgpu_rlc.h" 29 30 /** 31 * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode 32 * 33 * @adev: amdgpu_device pointer 34 * 35 * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode. 36 */ 37 void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev) 38 { 39 if (adev->gfx.rlc.in_safe_mode) 40 return; 41 42 /* if RLC is not enabled, do nothing */ 43 if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) 44 return; 45 46 if (adev->cg_flags & 47 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | 48 AMD_CG_SUPPORT_GFX_3D_CGCG)) { 49 adev->gfx.rlc.funcs->set_safe_mode(adev); 50 adev->gfx.rlc.in_safe_mode = true; 51 } 52 } 53 54 /** 55 * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode 56 * 57 * @adev: amdgpu_device pointer 58 * 59 * Set RLC exit safe mode if RLC is enabled and have entered into safe mode. 60 */ 61 void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev) 62 { 63 if (!(adev->gfx.rlc.in_safe_mode)) 64 return; 65 66 /* if RLC is not enabled, do nothing */ 67 if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) 68 return; 69 70 if (adev->cg_flags & 71 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | 72 AMD_CG_SUPPORT_GFX_3D_CGCG)) { 73 adev->gfx.rlc.funcs->unset_safe_mode(adev); 74 adev->gfx.rlc.in_safe_mode = false; 75 } 76 } 77 78 /** 79 * amdgpu_gfx_rlc_init_sr - Init save restore block 80 * 81 * @adev: amdgpu_device pointer 82 * @dws: the size of save restore block 83 * 84 * Allocate and setup value to save restore block of rlc. 85 * Returns 0 on succeess or negative error code if allocate failed. 86 */ 87 int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) 88 { 89 const u32 *src_ptr; 90 volatile u32 *dst_ptr; 91 u32 i; 92 int r; 93 94 /* allocate save restore block */ 95 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, 96 AMDGPU_GEM_DOMAIN_VRAM, 97 &adev->gfx.rlc.save_restore_obj, 98 &adev->gfx.rlc.save_restore_gpu_addr, 99 (void **)&adev->gfx.rlc.sr_ptr); 100 if (r) { 101 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); 102 amdgpu_gfx_rlc_fini(adev); 103 return r; 104 } 105 106 /* write the sr buffer */ 107 src_ptr = adev->gfx.rlc.reg_list; 108 dst_ptr = adev->gfx.rlc.sr_ptr; 109 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) 110 dst_ptr[i] = cpu_to_le32(src_ptr[i]); 111 amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); 112 amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); 113 114 return 0; 115 } 116 117 /** 118 * amdgpu_gfx_rlc_init_csb - Init clear state block 119 * 120 * @adev: amdgpu_device pointer 121 * 122 * Allocate and setup value to clear state block of rlc. 123 * Returns 0 on succeess or negative error code if allocate failed. 124 */ 125 int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) 126 { 127 u32 dws; 128 int r; 129 130 /* allocate clear state block */ 131 adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); 132 r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, 133 AMDGPU_GEM_DOMAIN_VRAM, 134 &adev->gfx.rlc.clear_state_obj, 135 &adev->gfx.rlc.clear_state_gpu_addr, 136 (void **)&adev->gfx.rlc.cs_ptr); 137 if (r) { 138 dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r); 139 amdgpu_gfx_rlc_fini(adev); 140 return r; 141 } 142 143 return 0; 144 } 145 146 /** 147 * amdgpu_gfx_rlc_init_cpt - Init cp table 148 * 149 * @adev: amdgpu_device pointer 150 * 151 * Allocate and setup value to cp table of rlc. 152 * Returns 0 on succeess or negative error code if allocate failed. 153 */ 154 int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev) 155 { 156 int r; 157 158 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, 159 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 160 &adev->gfx.rlc.cp_table_obj, 161 &adev->gfx.rlc.cp_table_gpu_addr, 162 (void **)&adev->gfx.rlc.cp_table_ptr); 163 if (r) { 164 dev_err(adev->dev, "(%d) failed to create cp table bo\n", r); 165 amdgpu_gfx_rlc_fini(adev); 166 return r; 167 } 168 169 /* set up the cp table */ 170 amdgpu_gfx_rlc_setup_cp_table(adev); 171 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); 172 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); 173 174 return 0; 175 } 176 177 /** 178 * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table 179 * 180 * @adev: amdgpu_device pointer 181 * 182 * Write cp firmware data into cp table. 183 */ 184 void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev) 185 { 186 const __le32 *fw_data; 187 volatile u32 *dst_ptr; 188 int me, i, max_me; 189 u32 bo_offset = 0; 190 u32 table_offset, table_size; 191 192 max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev); 193 194 /* write the cp table buffer */ 195 dst_ptr = adev->gfx.rlc.cp_table_ptr; 196 for (me = 0; me < max_me; me++) { 197 if (me == 0) { 198 const struct gfx_firmware_header_v1_0 *hdr = 199 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; 200 fw_data = (const __le32 *) 201 (adev->gfx.ce_fw->data + 202 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 203 table_offset = le32_to_cpu(hdr->jt_offset); 204 table_size = le32_to_cpu(hdr->jt_size); 205 } else if (me == 1) { 206 const struct gfx_firmware_header_v1_0 *hdr = 207 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 208 fw_data = (const __le32 *) 209 (adev->gfx.pfp_fw->data + 210 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 211 table_offset = le32_to_cpu(hdr->jt_offset); 212 table_size = le32_to_cpu(hdr->jt_size); 213 } else if (me == 2) { 214 const struct gfx_firmware_header_v1_0 *hdr = 215 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 216 fw_data = (const __le32 *) 217 (adev->gfx.me_fw->data + 218 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 219 table_offset = le32_to_cpu(hdr->jt_offset); 220 table_size = le32_to_cpu(hdr->jt_size); 221 } else if (me == 3) { 222 const struct gfx_firmware_header_v1_0 *hdr = 223 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 224 fw_data = (const __le32 *) 225 (adev->gfx.mec_fw->data + 226 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 227 table_offset = le32_to_cpu(hdr->jt_offset); 228 table_size = le32_to_cpu(hdr->jt_size); 229 } else if (me == 4) { 230 const struct gfx_firmware_header_v1_0 *hdr = 231 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 232 fw_data = (const __le32 *) 233 (adev->gfx.mec2_fw->data + 234 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 235 table_offset = le32_to_cpu(hdr->jt_offset); 236 table_size = le32_to_cpu(hdr->jt_size); 237 } 238 239 for (i = 0; i < table_size; i ++) { 240 dst_ptr[bo_offset + i] = 241 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); 242 } 243 244 bo_offset += table_size; 245 } 246 } 247 248 /** 249 * amdgpu_gfx_rlc_fini - Free BO which used for RLC 250 * 251 * @adev: amdgpu_device pointer 252 * 253 * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block 254 * and rlc_jump_table_block. 255 */ 256 void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev) 257 { 258 /* save restore block */ 259 if (adev->gfx.rlc.save_restore_obj) { 260 amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, 261 &adev->gfx.rlc.save_restore_gpu_addr, 262 (void **)&adev->gfx.rlc.sr_ptr); 263 } 264 265 /* clear state block */ 266 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 267 &adev->gfx.rlc.clear_state_gpu_addr, 268 (void **)&adev->gfx.rlc.cs_ptr); 269 270 /* jump table block */ 271 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 272 &adev->gfx.rlc.cp_table_gpu_addr, 273 (void **)&adev->gfx.rlc.cp_table_ptr); 274 } 275 276 static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev) 277 { 278 const struct common_firmware_header *common_hdr; 279 const struct rlc_firmware_header_v2_0 *rlc_hdr; 280 struct amdgpu_firmware_info *info; 281 unsigned int *tmp; 282 unsigned int i; 283 284 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 285 286 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); 287 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); 288 adev->gfx.rlc.save_and_restore_offset = 289 le32_to_cpu(rlc_hdr->save_and_restore_offset); 290 adev->gfx.rlc.clear_state_descriptor_offset = 291 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); 292 adev->gfx.rlc.avail_scratch_ram_locations = 293 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); 294 adev->gfx.rlc.reg_restore_list_size = 295 le32_to_cpu(rlc_hdr->reg_restore_list_size); 296 adev->gfx.rlc.reg_list_format_start = 297 le32_to_cpu(rlc_hdr->reg_list_format_start); 298 adev->gfx.rlc.reg_list_format_separate_start = 299 le32_to_cpu(rlc_hdr->reg_list_format_separate_start); 300 adev->gfx.rlc.starting_offsets_start = 301 le32_to_cpu(rlc_hdr->starting_offsets_start); 302 adev->gfx.rlc.reg_list_format_size_bytes = 303 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); 304 adev->gfx.rlc.reg_list_size_bytes = 305 le32_to_cpu(rlc_hdr->reg_list_size_bytes); 306 adev->gfx.rlc.register_list_format = 307 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + 308 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); 309 if (!adev->gfx.rlc.register_list_format) { 310 dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n"); 311 return -ENOMEM; 312 } 313 314 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 315 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); 316 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) 317 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); 318 319 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; 320 321 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 322 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); 323 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) 324 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); 325 326 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 327 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; 328 info->ucode_id = AMDGPU_UCODE_ID_RLC_G; 329 info->fw = adev->gfx.rlc_fw; 330 if (info->fw) { 331 common_hdr = (const struct common_firmware_header *)info->fw->data; 332 adev->firmware.fw_size += 333 ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE); 334 } 335 } 336 337 return 0; 338 } 339 340 static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev) 341 { 342 const struct rlc_firmware_header_v2_1 *rlc_hdr; 343 struct amdgpu_firmware_info *info; 344 345 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; 346 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); 347 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); 348 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); 349 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); 350 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); 351 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); 352 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); 353 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); 354 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); 355 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); 356 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); 357 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); 358 adev->gfx.rlc.reg_list_format_direct_reg_list_length = 359 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); 360 361 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 362 if (adev->gfx.rlc.save_restore_list_cntl_size_bytes) { 363 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL]; 364 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL; 365 info->fw = adev->gfx.rlc_fw; 366 adev->firmware.fw_size += 367 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE); 368 } 369 370 if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) { 371 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; 372 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; 373 info->fw = adev->gfx.rlc_fw; 374 adev->firmware.fw_size += 375 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); 376 } 377 378 if (adev->gfx.rlc.save_restore_list_srm_size_bytes) { 379 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; 380 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; 381 info->fw = adev->gfx.rlc_fw; 382 adev->firmware.fw_size += 383 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); 384 } 385 } 386 } 387 388 static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev) 389 { 390 const struct rlc_firmware_header_v2_2 *rlc_hdr; 391 struct amdgpu_firmware_info *info; 392 393 rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 394 adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes); 395 adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes); 396 adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes); 397 adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes); 398 399 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 400 if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) { 401 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM]; 402 info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM; 403 info->fw = adev->gfx.rlc_fw; 404 adev->firmware.fw_size += 405 ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE); 406 } 407 408 if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) { 409 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM]; 410 info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM; 411 info->fw = adev->gfx.rlc_fw; 412 adev->firmware.fw_size += 413 ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE); 414 } 415 } 416 } 417 418 static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev) 419 { 420 const struct rlc_firmware_header_v2_3 *rlc_hdr; 421 struct amdgpu_firmware_info *info; 422 423 rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data; 424 adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version); 425 adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version); 426 adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes); 427 adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes); 428 429 adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version); 430 adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version); 431 adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes); 432 adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes); 433 434 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 435 if (adev->gfx.rlc.rlcp_ucode_size_bytes) { 436 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P]; 437 info->ucode_id = AMDGPU_UCODE_ID_RLC_P; 438 info->fw = adev->gfx.rlc_fw; 439 adev->firmware.fw_size += 440 ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE); 441 } 442 443 if (adev->gfx.rlc.rlcv_ucode_size_bytes) { 444 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V]; 445 info->ucode_id = AMDGPU_UCODE_ID_RLC_V; 446 info->fw = adev->gfx.rlc_fw; 447 adev->firmware.fw_size += 448 ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE); 449 } 450 } 451 } 452 453 static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev) 454 { 455 const struct rlc_firmware_header_v2_4 *rlc_hdr; 456 struct amdgpu_firmware_info *info; 457 458 rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data; 459 adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes); 460 adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes); 461 adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes); 462 adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes); 463 adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes); 464 adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes); 465 adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes); 466 adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes); 467 adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes); 468 adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes); 469 470 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 471 if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) { 472 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS]; 473 info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS; 474 info->fw = adev->gfx.rlc_fw; 475 adev->firmware.fw_size += 476 ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE); 477 } 478 479 if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) { 480 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS]; 481 info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS; 482 info->fw = adev->gfx.rlc_fw; 483 adev->firmware.fw_size += 484 ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE); 485 } 486 487 if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) { 488 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS]; 489 info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS; 490 info->fw = adev->gfx.rlc_fw; 491 adev->firmware.fw_size += 492 ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE); 493 } 494 495 if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) { 496 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS]; 497 info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS; 498 info->fw = adev->gfx.rlc_fw; 499 adev->firmware.fw_size += 500 ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE); 501 } 502 503 if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) { 504 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS]; 505 info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS; 506 info->fw = adev->gfx.rlc_fw; 507 adev->firmware.fw_size += 508 ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE); 509 } 510 } 511 } 512 513 int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev, 514 uint16_t version_major, 515 uint16_t version_minor) 516 { 517 int err; 518 519 if (version_major < 2) { 520 /* only support rlc_hdr v2.x and onwards */ 521 dev_err(adev->dev, "unsupported rlc fw hdr\n"); 522 return -EINVAL; 523 } 524 525 /* is_rlc_v2_1 is still used in APU code path */ 526 if (version_major == 2 && version_minor == 1) 527 adev->gfx.rlc.is_rlc_v2_1 = true; 528 529 if (version_minor >= 0) { 530 err = amdgpu_gfx_rlc_init_microcode_v2_0(adev); 531 if (err) { 532 dev_err(adev->dev, "fail to init rlc v2_0 microcode\n"); 533 return err; 534 } 535 } 536 if (version_minor >= 1) 537 amdgpu_gfx_rlc_init_microcode_v2_1(adev); 538 if (version_minor >= 2) 539 amdgpu_gfx_rlc_init_microcode_v2_2(adev); 540 if (version_minor == 3) 541 amdgpu_gfx_rlc_init_microcode_v2_3(adev); 542 if (version_minor == 4) 543 amdgpu_gfx_rlc_init_microcode_v2_4(adev); 544 545 return 0; 546 } 547