1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 #include <linux/firmware.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_rlc.h"
29 
30 /**
31  * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
32  *
33  * @adev: amdgpu_device pointer
34  *
35  * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
36  */
37 void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
38 {
39 	if (adev->gfx.rlc.in_safe_mode)
40 		return;
41 
42 	/* if RLC is not enabled, do nothing */
43 	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
44 		return;
45 
46 	if (adev->cg_flags &
47 	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
48 	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
49 		adev->gfx.rlc.funcs->set_safe_mode(adev);
50 		adev->gfx.rlc.in_safe_mode = true;
51 	}
52 }
53 
54 /**
55  * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
56  *
57  * @adev: amdgpu_device pointer
58  *
59  * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
60  */
61 void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
62 {
63 	if (!(adev->gfx.rlc.in_safe_mode))
64 		return;
65 
66 	/* if RLC is not enabled, do nothing */
67 	if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
68 		return;
69 
70 	if (adev->cg_flags &
71 	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
72 	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
73 		adev->gfx.rlc.funcs->unset_safe_mode(adev);
74 		adev->gfx.rlc.in_safe_mode = false;
75 	}
76 }
77 
78 /**
79  * amdgpu_gfx_rlc_init_sr - Init save restore block
80  *
81  * @adev: amdgpu_device pointer
82  * @dws: the size of save restore block
83  *
84  * Allocate and setup value to save restore block of rlc.
85  * Returns 0 on succeess or negative error code if allocate failed.
86  */
87 int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
88 {
89 	const u32 *src_ptr;
90 	volatile u32 *dst_ptr;
91 	u32 i;
92 	int r;
93 
94 	/* allocate save restore block */
95 	r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
96 				      AMDGPU_GEM_DOMAIN_VRAM |
97 				      AMDGPU_GEM_DOMAIN_GTT,
98 				      &adev->gfx.rlc.save_restore_obj,
99 				      &adev->gfx.rlc.save_restore_gpu_addr,
100 				      (void **)&adev->gfx.rlc.sr_ptr);
101 	if (r) {
102 		dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
103 		amdgpu_gfx_rlc_fini(adev);
104 		return r;
105 	}
106 
107 	/* write the sr buffer */
108 	src_ptr = adev->gfx.rlc.reg_list;
109 	dst_ptr = adev->gfx.rlc.sr_ptr;
110 	for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
111 		dst_ptr[i] = cpu_to_le32(src_ptr[i]);
112 	amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
113 	amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
114 
115 	return 0;
116 }
117 
118 /**
119  * amdgpu_gfx_rlc_init_csb - Init clear state block
120  *
121  * @adev: amdgpu_device pointer
122  *
123  * Allocate and setup value to clear state block of rlc.
124  * Returns 0 on succeess or negative error code if allocate failed.
125  */
126 int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
127 {
128 	u32 dws;
129 	int r;
130 
131 	/* allocate clear state block */
132 	adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
133 	r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
134 				      AMDGPU_GEM_DOMAIN_VRAM |
135 				      AMDGPU_GEM_DOMAIN_GTT,
136 				      &adev->gfx.rlc.clear_state_obj,
137 				      &adev->gfx.rlc.clear_state_gpu_addr,
138 				      (void **)&adev->gfx.rlc.cs_ptr);
139 	if (r) {
140 		dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
141 		amdgpu_gfx_rlc_fini(adev);
142 		return r;
143 	}
144 
145 	return 0;
146 }
147 
148 /**
149  * amdgpu_gfx_rlc_init_cpt - Init cp table
150  *
151  * @adev: amdgpu_device pointer
152  *
153  * Allocate and setup value to cp table of rlc.
154  * Returns 0 on succeess or negative error code if allocate failed.
155  */
156 int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
157 {
158 	int r;
159 
160 	r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
161 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
162 				      AMDGPU_GEM_DOMAIN_GTT,
163 				      &adev->gfx.rlc.cp_table_obj,
164 				      &adev->gfx.rlc.cp_table_gpu_addr,
165 				      (void **)&adev->gfx.rlc.cp_table_ptr);
166 	if (r) {
167 		dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
168 		amdgpu_gfx_rlc_fini(adev);
169 		return r;
170 	}
171 
172 	/* set up the cp table */
173 	amdgpu_gfx_rlc_setup_cp_table(adev);
174 	amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
175 	amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
176 
177 	return 0;
178 }
179 
180 /**
181  * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
182  *
183  * @adev: amdgpu_device pointer
184  *
185  * Write cp firmware data into cp table.
186  */
187 void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
188 {
189 	const __le32 *fw_data;
190 	volatile u32 *dst_ptr;
191 	int me, i, max_me;
192 	u32 bo_offset = 0;
193 	u32 table_offset, table_size;
194 
195 	max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
196 
197 	/* write the cp table buffer */
198 	dst_ptr = adev->gfx.rlc.cp_table_ptr;
199 	for (me = 0; me < max_me; me++) {
200 		if (me == 0) {
201 			const struct gfx_firmware_header_v1_0 *hdr =
202 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
203 			fw_data = (const __le32 *)
204 				(adev->gfx.ce_fw->data +
205 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
206 			table_offset = le32_to_cpu(hdr->jt_offset);
207 			table_size = le32_to_cpu(hdr->jt_size);
208 		} else if (me == 1) {
209 			const struct gfx_firmware_header_v1_0 *hdr =
210 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
211 			fw_data = (const __le32 *)
212 				(adev->gfx.pfp_fw->data +
213 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
214 			table_offset = le32_to_cpu(hdr->jt_offset);
215 			table_size = le32_to_cpu(hdr->jt_size);
216 		} else if (me == 2) {
217 			const struct gfx_firmware_header_v1_0 *hdr =
218 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
219 			fw_data = (const __le32 *)
220 				(adev->gfx.me_fw->data +
221 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
222 			table_offset = le32_to_cpu(hdr->jt_offset);
223 			table_size = le32_to_cpu(hdr->jt_size);
224 		} else if (me == 3) {
225 			const struct gfx_firmware_header_v1_0 *hdr =
226 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
227 			fw_data = (const __le32 *)
228 				(adev->gfx.mec_fw->data +
229 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
230 			table_offset = le32_to_cpu(hdr->jt_offset);
231 			table_size = le32_to_cpu(hdr->jt_size);
232 		} else  if (me == 4) {
233 			const struct gfx_firmware_header_v1_0 *hdr =
234 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
235 			fw_data = (const __le32 *)
236 				(adev->gfx.mec2_fw->data +
237 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
238 			table_offset = le32_to_cpu(hdr->jt_offset);
239 			table_size = le32_to_cpu(hdr->jt_size);
240 		}
241 
242 		for (i = 0; i < table_size; i ++) {
243 			dst_ptr[bo_offset + i] =
244 				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
245 		}
246 
247 		bo_offset += table_size;
248 	}
249 }
250 
251 /**
252  * amdgpu_gfx_rlc_fini - Free BO which used for RLC
253  *
254  * @adev: amdgpu_device pointer
255  *
256  * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
257  * and rlc_jump_table_block.
258  */
259 void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
260 {
261 	/* save restore block */
262 	if (adev->gfx.rlc.save_restore_obj) {
263 		amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
264 				      &adev->gfx.rlc.save_restore_gpu_addr,
265 				      (void **)&adev->gfx.rlc.sr_ptr);
266 	}
267 
268 	/* clear state block */
269 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
270 			      &adev->gfx.rlc.clear_state_gpu_addr,
271 			      (void **)&adev->gfx.rlc.cs_ptr);
272 
273 	/* jump table block */
274 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
275 			      &adev->gfx.rlc.cp_table_gpu_addr,
276 			      (void **)&adev->gfx.rlc.cp_table_ptr);
277 }
278 
279 static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev)
280 {
281 	const struct common_firmware_header *common_hdr;
282 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
283 	struct amdgpu_firmware_info *info;
284 	unsigned int *tmp;
285 	unsigned int i;
286 
287 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
288 
289 	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
290 	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
291 	adev->gfx.rlc.save_and_restore_offset =
292 		le32_to_cpu(rlc_hdr->save_and_restore_offset);
293 	adev->gfx.rlc.clear_state_descriptor_offset =
294 		le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
295 	adev->gfx.rlc.avail_scratch_ram_locations =
296 		le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
297 	adev->gfx.rlc.reg_restore_list_size =
298 		le32_to_cpu(rlc_hdr->reg_restore_list_size);
299 	adev->gfx.rlc.reg_list_format_start =
300 		le32_to_cpu(rlc_hdr->reg_list_format_start);
301 	adev->gfx.rlc.reg_list_format_separate_start =
302 		le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
303 	adev->gfx.rlc.starting_offsets_start =
304 		le32_to_cpu(rlc_hdr->starting_offsets_start);
305 	adev->gfx.rlc.reg_list_format_size_bytes =
306 		le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
307 	adev->gfx.rlc.reg_list_size_bytes =
308 		le32_to_cpu(rlc_hdr->reg_list_size_bytes);
309 	adev->gfx.rlc.register_list_format =
310 		kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
311 			adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
312 	if (!adev->gfx.rlc.register_list_format) {
313 		dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n");
314 		return -ENOMEM;
315 	}
316 
317 	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
318 			le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
319 	for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
320 		adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
321 
322 	adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
323 
324 	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
325 			le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
326 	for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
327 		adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
328 
329 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
330 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
331 		info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
332 		info->fw = adev->gfx.rlc_fw;
333 		if (info->fw) {
334 			common_hdr = (const struct common_firmware_header *)info->fw->data;
335 			adev->firmware.fw_size +=
336 				ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE);
337 		}
338 	}
339 
340 	return 0;
341 }
342 
343 static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev)
344 {
345 	const struct rlc_firmware_header_v2_1 *rlc_hdr;
346 	struct amdgpu_firmware_info *info;
347 
348 	rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
349 	adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
350 	adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
351 	adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
352 	adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
353 	adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
354 	adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
355 	adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
356 	adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
357 	adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
358 	adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
359 	adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
360 	adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
361 	adev->gfx.rlc.reg_list_format_direct_reg_list_length =
362 		le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
363 
364 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
365 		if (adev->gfx.rlc.save_restore_list_cntl_size_bytes) {
366 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
367 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
368 			info->fw = adev->gfx.rlc_fw;
369 			adev->firmware.fw_size +=
370 				ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
371 		}
372 
373 		if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) {
374 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
375 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
376 			info->fw = adev->gfx.rlc_fw;
377 			adev->firmware.fw_size +=
378 				ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
379 		}
380 
381 		if (adev->gfx.rlc.save_restore_list_srm_size_bytes) {
382 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
383 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
384 			info->fw = adev->gfx.rlc_fw;
385 			adev->firmware.fw_size +=
386 				ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
387 		}
388 	}
389 }
390 
391 static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev)
392 {
393 	const struct rlc_firmware_header_v2_2 *rlc_hdr;
394 	struct amdgpu_firmware_info *info;
395 
396 	rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
397 	adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
398 	adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
399 	adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
400 	adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
401 
402 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
403 		if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) {
404 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
405 			info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
406 			info->fw = adev->gfx.rlc_fw;
407 			adev->firmware.fw_size +=
408 				ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
409 		}
410 
411 		if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
412 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
413 			info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
414 			info->fw = adev->gfx.rlc_fw;
415 			adev->firmware.fw_size +=
416 				ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
417 		}
418 	}
419 }
420 
421 static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev)
422 {
423 	const struct rlc_firmware_header_v2_3 *rlc_hdr;
424 	struct amdgpu_firmware_info *info;
425 
426 	rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
427 	adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
428 	adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
429 	adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
430 	adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
431 
432 	adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
433 	adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
434 	adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
435 	adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
436 
437 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
438 		if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
439 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
440 			info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
441 			info->fw = adev->gfx.rlc_fw;
442 			adev->firmware.fw_size +=
443 				ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
444 		}
445 
446 		if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
447 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
448 			info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
449 			info->fw = adev->gfx.rlc_fw;
450 			adev->firmware.fw_size +=
451 				ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
452 		}
453 	}
454 }
455 
456 static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev)
457 {
458 	const struct rlc_firmware_header_v2_4 *rlc_hdr;
459 	struct amdgpu_firmware_info *info;
460 
461 	rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
462 	adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
463 	adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
464 	adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
465 	adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
466 	adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
467 	adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
468 	adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
469 	adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
470 	adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
471 	adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
472 
473 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
474 		if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
475 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
476 			info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
477 			info->fw = adev->gfx.rlc_fw;
478 			adev->firmware.fw_size +=
479 				ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
480 		}
481 
482 		if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
483 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
484 			info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
485 			info->fw = adev->gfx.rlc_fw;
486 			adev->firmware.fw_size +=
487 				ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
488 		}
489 
490 		if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
491 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
492 			info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
493 			info->fw = adev->gfx.rlc_fw;
494 			adev->firmware.fw_size +=
495 				ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
496 		}
497 
498 		if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
499 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
500 			info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
501 			info->fw = adev->gfx.rlc_fw;
502 			adev->firmware.fw_size +=
503 				ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
504 		}
505 
506 		if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
507 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
508 			info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
509 			info->fw = adev->gfx.rlc_fw;
510 			adev->firmware.fw_size +=
511 				ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
512 		}
513 	}
514 }
515 
516 int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
517 				  uint16_t version_major,
518 				  uint16_t version_minor)
519 {
520 	int err;
521 
522 	if (version_major < 2) {
523 		/* only support rlc_hdr v2.x and onwards */
524 		dev_err(adev->dev, "unsupported rlc fw hdr\n");
525 		return -EINVAL;
526 	}
527 
528 	/* is_rlc_v2_1 is still used in APU code path */
529 	if (version_major == 2 && version_minor == 1)
530 		adev->gfx.rlc.is_rlc_v2_1 = true;
531 
532 	if (version_minor >= 0) {
533 		err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
534 		if (err) {
535 			dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
536 			return err;
537 		}
538 	}
539 	if (version_minor >= 1)
540 		amdgpu_gfx_rlc_init_microcode_v2_1(adev);
541 	if (version_minor >= 2)
542 		amdgpu_gfx_rlc_init_microcode_v2_2(adev);
543 	if (version_minor == 3)
544 		amdgpu_gfx_rlc_init_microcode_v2_3(adev);
545 	if (version_minor == 4)
546 		amdgpu_gfx_rlc_init_microcode_v2_4(adev);
547 
548 	return 0;
549 }
550