xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c (revision 25879d7b4986beba3f0d84762fe40d09fdc8b219)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 
25 #include "amdgpu.h"
26 #include "amdgpu_xcp.h"
27 #include "amdgpu_gfx.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "soc15_common.h"
31 #include "vega10_enum.h"
32 
33 #include "clearstate_gfx9.h"
34 #include "v9_structs.h"
35 
36 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
37 
38 #include "gc/gc_9_4_3_offset.h"
39 #include "gc/gc_9_4_3_sh_mask.h"
40 
41 #include "gfx_v9_4_3.h"
42 #include "amdgpu_xcp.h"
43 
44 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
45 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
46 
47 #define GFX9_MEC_HPD_SIZE 4096
48 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
49 
50 struct amdgpu_gfx_ras gfx_v9_4_3_ras;
51 
52 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
53 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
54 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
55 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
56 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
57 				struct amdgpu_cu_info *cu_info);
58 
59 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
60 				uint64_t queue_mask)
61 {
62 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
63 	amdgpu_ring_write(kiq_ring,
64 		PACKET3_SET_RESOURCES_VMID_MASK(0) |
65 		/* vmid_mask:0* queue_type:0 (KIQ) */
66 		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
67 	amdgpu_ring_write(kiq_ring,
68 			lower_32_bits(queue_mask));	/* queue mask lo */
69 	amdgpu_ring_write(kiq_ring,
70 			upper_32_bits(queue_mask));	/* queue mask hi */
71 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
72 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
73 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
74 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
75 }
76 
77 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
78 				 struct amdgpu_ring *ring)
79 {
80 	struct amdgpu_device *adev = kiq_ring->adev;
81 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
82 	uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
83 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
84 
85 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
86 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
87 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
88 			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
89 			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
90 			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
91 			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
92 			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
93 			 /*queue_type: normal compute queue */
94 			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
95 			 /* alloc format: all_on_one_pipe */
96 			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
97 			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
98 			 /* num_queues: must be 1 */
99 			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
100 	amdgpu_ring_write(kiq_ring,
101 			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
102 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
103 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
104 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
105 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
106 }
107 
108 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
109 				   struct amdgpu_ring *ring,
110 				   enum amdgpu_unmap_queues_action action,
111 				   u64 gpu_addr, u64 seq)
112 {
113 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
114 
115 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
116 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
117 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
118 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
119 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
120 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
121 	amdgpu_ring_write(kiq_ring,
122 			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
123 
124 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
125 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
126 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
127 		amdgpu_ring_write(kiq_ring, seq);
128 	} else {
129 		amdgpu_ring_write(kiq_ring, 0);
130 		amdgpu_ring_write(kiq_ring, 0);
131 		amdgpu_ring_write(kiq_ring, 0);
132 	}
133 }
134 
135 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
136 				   struct amdgpu_ring *ring,
137 				   u64 addr,
138 				   u64 seq)
139 {
140 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
141 
142 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
143 	amdgpu_ring_write(kiq_ring,
144 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
145 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
146 			  PACKET3_QUERY_STATUS_COMMAND(2));
147 	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
148 	amdgpu_ring_write(kiq_ring,
149 			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
150 			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
151 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
152 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
153 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
154 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
155 }
156 
157 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
158 				uint16_t pasid, uint32_t flush_type,
159 				bool all_hub)
160 {
161 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
162 	amdgpu_ring_write(kiq_ring,
163 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
164 			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
165 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
166 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
167 }
168 
169 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
170 	.kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
171 	.kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
172 	.kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
173 	.kiq_query_status = gfx_v9_4_3_kiq_query_status,
174 	.kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
175 	.set_resources_size = 8,
176 	.map_queues_size = 7,
177 	.unmap_queues_size = 6,
178 	.query_status_size = 7,
179 	.invalidate_tlbs_size = 2,
180 };
181 
182 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
183 {
184 	int i, num_xcc;
185 
186 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
187 	for (i = 0; i < num_xcc; i++)
188 		adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
189 }
190 
191 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
192 {
193 	int i, num_xcc, dev_inst;
194 
195 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
196 	for (i = 0; i < num_xcc; i++) {
197 		dev_inst = GET_INST(GC, i);
198 		if (dev_inst >= 2)
199 			WREG32_SOC15(GC, dev_inst, regGRBM_MCM_ADDR, 0x4);
200 	}
201 }
202 
203 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
204 				       bool wc, uint32_t reg, uint32_t val)
205 {
206 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
207 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
208 				WRITE_DATA_DST_SEL(0) |
209 				(wc ? WR_CONFIRM : 0));
210 	amdgpu_ring_write(ring, reg);
211 	amdgpu_ring_write(ring, 0);
212 	amdgpu_ring_write(ring, val);
213 }
214 
215 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
216 				  int mem_space, int opt, uint32_t addr0,
217 				  uint32_t addr1, uint32_t ref, uint32_t mask,
218 				  uint32_t inv)
219 {
220 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
221 	amdgpu_ring_write(ring,
222 				 /* memory (1) or register (0) */
223 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
224 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
225 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
226 				 WAIT_REG_MEM_ENGINE(eng_sel)));
227 
228 	if (mem_space)
229 		BUG_ON(addr0 & 0x3); /* Dword align */
230 	amdgpu_ring_write(ring, addr0);
231 	amdgpu_ring_write(ring, addr1);
232 	amdgpu_ring_write(ring, ref);
233 	amdgpu_ring_write(ring, mask);
234 	amdgpu_ring_write(ring, inv); /* poll interval */
235 }
236 
237 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
238 {
239 	uint32_t scratch_reg0_offset, xcc_offset;
240 	struct amdgpu_device *adev = ring->adev;
241 	uint32_t tmp = 0;
242 	unsigned i;
243 	int r;
244 
245 	/* Use register offset which is local to XCC in the packet */
246 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
247 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
248 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
249 
250 	r = amdgpu_ring_alloc(ring, 3);
251 	if (r)
252 		return r;
253 
254 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
255 	amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
256 	amdgpu_ring_write(ring, 0xDEADBEEF);
257 	amdgpu_ring_commit(ring);
258 
259 	for (i = 0; i < adev->usec_timeout; i++) {
260 		tmp = RREG32(scratch_reg0_offset);
261 		if (tmp == 0xDEADBEEF)
262 			break;
263 		udelay(1);
264 	}
265 
266 	if (i >= adev->usec_timeout)
267 		r = -ETIMEDOUT;
268 	return r;
269 }
270 
271 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
272 {
273 	struct amdgpu_device *adev = ring->adev;
274 	struct amdgpu_ib ib;
275 	struct dma_fence *f = NULL;
276 
277 	unsigned index;
278 	uint64_t gpu_addr;
279 	uint32_t tmp;
280 	long r;
281 
282 	r = amdgpu_device_wb_get(adev, &index);
283 	if (r)
284 		return r;
285 
286 	gpu_addr = adev->wb.gpu_addr + (index * 4);
287 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
288 	memset(&ib, 0, sizeof(ib));
289 	r = amdgpu_ib_get(adev, NULL, 16,
290 			  AMDGPU_IB_POOL_DIRECT, &ib);
291 	if (r)
292 		goto err1;
293 
294 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
295 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
296 	ib.ptr[2] = lower_32_bits(gpu_addr);
297 	ib.ptr[3] = upper_32_bits(gpu_addr);
298 	ib.ptr[4] = 0xDEADBEEF;
299 	ib.length_dw = 5;
300 
301 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
302 	if (r)
303 		goto err2;
304 
305 	r = dma_fence_wait_timeout(f, false, timeout);
306 	if (r == 0) {
307 		r = -ETIMEDOUT;
308 		goto err2;
309 	} else if (r < 0) {
310 		goto err2;
311 	}
312 
313 	tmp = adev->wb.wb[index];
314 	if (tmp == 0xDEADBEEF)
315 		r = 0;
316 	else
317 		r = -EINVAL;
318 
319 err2:
320 	amdgpu_ib_free(adev, &ib, NULL);
321 	dma_fence_put(f);
322 err1:
323 	amdgpu_device_wb_free(adev, index);
324 	return r;
325 }
326 
327 
328 /* This value might differs per partition */
329 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
330 {
331 	uint64_t clock;
332 
333 	amdgpu_gfx_off_ctrl(adev, false);
334 	mutex_lock(&adev->gfx.gpu_clock_mutex);
335 	WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
336 	clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
337 		((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
338 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
339 	amdgpu_gfx_off_ctrl(adev, true);
340 
341 	return clock;
342 }
343 
344 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
345 {
346 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
347 	amdgpu_ucode_release(&adev->gfx.me_fw);
348 	amdgpu_ucode_release(&adev->gfx.ce_fw);
349 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
350 	amdgpu_ucode_release(&adev->gfx.mec_fw);
351 	amdgpu_ucode_release(&adev->gfx.mec2_fw);
352 
353 	kfree(adev->gfx.rlc.register_list_format);
354 }
355 
356 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
357 					  const char *chip_name)
358 {
359 	char fw_name[30];
360 	int err;
361 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
362 	uint16_t version_major;
363 	uint16_t version_minor;
364 
365 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
366 
367 	err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
368 	if (err)
369 		goto out;
370 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
371 
372 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
373 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
374 	err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
375 out:
376 	if (err)
377 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
378 
379 	return err;
380 }
381 
382 static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
383 {
384 	return true;
385 }
386 
387 static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
388 {
389 	if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
390 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
391 }
392 
393 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
394 					  const char *chip_name)
395 {
396 	char fw_name[30];
397 	int err;
398 
399 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
400 
401 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
402 	if (err)
403 		goto out;
404 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
405 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
406 
407 	adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
408 	adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
409 
410 	gfx_v9_4_3_check_if_need_gfxoff(adev);
411 
412 out:
413 	if (err)
414 		amdgpu_ucode_release(&adev->gfx.mec_fw);
415 	return err;
416 }
417 
418 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
419 {
420 	const char *chip_name;
421 	int r;
422 
423 	chip_name = "gc_9_4_3";
424 
425 	r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name);
426 	if (r)
427 		return r;
428 
429 	r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name);
430 	if (r)
431 		return r;
432 
433 	return r;
434 }
435 
436 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
437 {
438 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
439 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
440 }
441 
442 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
443 {
444 	int r, i, num_xcc;
445 	u32 *hpd;
446 	const __le32 *fw_data;
447 	unsigned fw_size;
448 	u32 *fw;
449 	size_t mec_hpd_size;
450 
451 	const struct gfx_firmware_header_v1_0 *mec_hdr;
452 
453 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
454 	for (i = 0; i < num_xcc; i++)
455 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
456 			AMDGPU_MAX_COMPUTE_QUEUES);
457 
458 	/* take ownership of the relevant compute queues */
459 	amdgpu_gfx_compute_queue_acquire(adev);
460 	mec_hpd_size =
461 		adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
462 	if (mec_hpd_size) {
463 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
464 					      AMDGPU_GEM_DOMAIN_VRAM |
465 					      AMDGPU_GEM_DOMAIN_GTT,
466 					      &adev->gfx.mec.hpd_eop_obj,
467 					      &adev->gfx.mec.hpd_eop_gpu_addr,
468 					      (void **)&hpd);
469 		if (r) {
470 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
471 			gfx_v9_4_3_mec_fini(adev);
472 			return r;
473 		}
474 
475 		if (amdgpu_emu_mode == 1) {
476 			for (i = 0; i < mec_hpd_size / 4; i++) {
477 				memset((void *)(hpd + i), 0, 4);
478 				if (i % 50 == 0)
479 					msleep(1);
480 			}
481 		} else {
482 			memset(hpd, 0, mec_hpd_size);
483 		}
484 
485 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
486 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
487 	}
488 
489 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
490 
491 	fw_data = (const __le32 *)
492 		(adev->gfx.mec_fw->data +
493 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
494 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
495 
496 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
497 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
498 				      &adev->gfx.mec.mec_fw_obj,
499 				      &adev->gfx.mec.mec_fw_gpu_addr,
500 				      (void **)&fw);
501 	if (r) {
502 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
503 		gfx_v9_4_3_mec_fini(adev);
504 		return r;
505 	}
506 
507 	memcpy(fw, fw_data, fw_size);
508 
509 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
510 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
511 
512 	return 0;
513 }
514 
515 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
516 					u32 sh_num, u32 instance, int xcc_id)
517 {
518 	u32 data;
519 
520 	if (instance == 0xffffffff)
521 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
522 				     INSTANCE_BROADCAST_WRITES, 1);
523 	else
524 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
525 				     INSTANCE_INDEX, instance);
526 
527 	if (se_num == 0xffffffff)
528 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
529 				     SE_BROADCAST_WRITES, 1);
530 	else
531 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
532 
533 	if (sh_num == 0xffffffff)
534 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
535 				     SH_BROADCAST_WRITES, 1);
536 	else
537 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
538 
539 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
540 }
541 
542 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
543 {
544 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
545 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
546 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
547 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
548 		(SQ_IND_INDEX__FORCE_READ_MASK));
549 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
550 }
551 
552 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
553 			   uint32_t wave, uint32_t thread,
554 			   uint32_t regno, uint32_t num, uint32_t *out)
555 {
556 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
557 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
558 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
559 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
560 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
561 		(SQ_IND_INDEX__FORCE_READ_MASK) |
562 		(SQ_IND_INDEX__AUTO_INCR_MASK));
563 	while (num--)
564 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
565 }
566 
567 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
568 				      uint32_t xcc_id, uint32_t simd, uint32_t wave,
569 				      uint32_t *dst, int *no_fields)
570 {
571 	/* type 1 wave data */
572 	dst[(*no_fields)++] = 1;
573 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
574 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
575 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
576 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
577 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
578 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
579 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
580 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
581 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
582 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
583 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
584 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
585 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
586 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
587 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
588 }
589 
590 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
591 				       uint32_t wave, uint32_t start,
592 				       uint32_t size, uint32_t *dst)
593 {
594 	wave_read_regs(adev, xcc_id, simd, wave, 0,
595 		       start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
596 }
597 
598 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
599 				       uint32_t wave, uint32_t thread,
600 				       uint32_t start, uint32_t size,
601 				       uint32_t *dst)
602 {
603 	wave_read_regs(adev, xcc_id, simd, wave, thread,
604 		       start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
605 }
606 
607 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
608 					u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
609 {
610 	soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
611 }
612 
613 
614 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
615 						int num_xccs_per_xcp)
616 {
617 	int i, num_xcc;
618 	u32 tmp = 0;
619 
620 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
621 
622 	for (i = 0; i < num_xcc; i++) {
623 		tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
624 				    num_xccs_per_xcp);
625 		tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
626 				    i % num_xccs_per_xcp);
627 		WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp);
628 	}
629 
630 	adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
631 
632 	return 0;
633 }
634 
635 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
636 {
637 	int xcc;
638 
639 	xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
640 	if (!xcc) {
641 		dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
642 		return -EINVAL;
643 	}
644 
645 	return xcc - 1;
646 }
647 
648 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
649 	.get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
650 	.select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
651 	.read_wave_data = &gfx_v9_4_3_read_wave_data,
652 	.read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
653 	.read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
654 	.select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
655 	.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
656 	.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
657 };
658 
659 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
660 {
661 	u32 gb_addr_config;
662 
663 	adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
664 	adev->gfx.ras = &gfx_v9_4_3_ras;
665 
666 	switch (adev->ip_versions[GC_HWIP][0]) {
667 	case IP_VERSION(9, 4, 3):
668 		adev->gfx.config.max_hw_contexts = 8;
669 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
670 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
671 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
672 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
673 		gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
674 		break;
675 	default:
676 		BUG();
677 		break;
678 	}
679 
680 	adev->gfx.config.gb_addr_config = gb_addr_config;
681 
682 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
683 			REG_GET_FIELD(
684 					adev->gfx.config.gb_addr_config,
685 					GB_ADDR_CONFIG,
686 					NUM_PIPES);
687 
688 	adev->gfx.config.max_tile_pipes =
689 		adev->gfx.config.gb_addr_config_fields.num_pipes;
690 
691 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
692 			REG_GET_FIELD(
693 					adev->gfx.config.gb_addr_config,
694 					GB_ADDR_CONFIG,
695 					NUM_BANKS);
696 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
697 			REG_GET_FIELD(
698 					adev->gfx.config.gb_addr_config,
699 					GB_ADDR_CONFIG,
700 					MAX_COMPRESSED_FRAGS);
701 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
702 			REG_GET_FIELD(
703 					adev->gfx.config.gb_addr_config,
704 					GB_ADDR_CONFIG,
705 					NUM_RB_PER_SE);
706 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
707 			REG_GET_FIELD(
708 					adev->gfx.config.gb_addr_config,
709 					GB_ADDR_CONFIG,
710 					NUM_SHADER_ENGINES);
711 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
712 			REG_GET_FIELD(
713 					adev->gfx.config.gb_addr_config,
714 					GB_ADDR_CONFIG,
715 					PIPE_INTERLEAVE_SIZE));
716 
717 	return 0;
718 }
719 
720 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
721 				        int xcc_id, int mec, int pipe, int queue)
722 {
723 	unsigned irq_type;
724 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
725 	unsigned int hw_prio;
726 	uint32_t xcc_doorbell_start;
727 
728 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
729 				       ring_id];
730 
731 	/* mec0 is me1 */
732 	ring->xcc_id = xcc_id;
733 	ring->me = mec + 1;
734 	ring->pipe = pipe;
735 	ring->queue = queue;
736 
737 	ring->ring_obj = NULL;
738 	ring->use_doorbell = true;
739 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
740 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
741 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
742 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
743 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
744 				     GFX9_MEC_HPD_SIZE;
745 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
746 	sprintf(ring->name, "comp_%d.%d.%d.%d",
747 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
748 
749 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
750 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
751 		+ ring->pipe;
752 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
753 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
754 	/* type-2 packets are deprecated on MEC, use type-3 instead */
755 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
756 				hw_prio, NULL);
757 }
758 
759 static int gfx_v9_4_3_sw_init(void *handle)
760 {
761 	int i, j, k, r, ring_id, xcc_id, num_xcc;
762 	struct amdgpu_kiq *kiq;
763 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
764 
765 	adev->gfx.mec.num_mec = 2;
766 	adev->gfx.mec.num_pipe_per_mec = 4;
767 	adev->gfx.mec.num_queue_per_pipe = 8;
768 
769 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
770 
771 	/* EOP Event */
772 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
773 	if (r)
774 		return r;
775 
776 	/* Privileged reg */
777 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
778 			      &adev->gfx.priv_reg_irq);
779 	if (r)
780 		return r;
781 
782 	/* Privileged inst */
783 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
784 			      &adev->gfx.priv_inst_irq);
785 	if (r)
786 		return r;
787 
788 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
789 
790 	r = adev->gfx.rlc.funcs->init(adev);
791 	if (r) {
792 		DRM_ERROR("Failed to init rlc BOs!\n");
793 		return r;
794 	}
795 
796 	r = gfx_v9_4_3_mec_init(adev);
797 	if (r) {
798 		DRM_ERROR("Failed to init MEC BOs!\n");
799 		return r;
800 	}
801 
802 	/* set up the compute queues - allocate horizontally across pipes */
803 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
804 		ring_id = 0;
805 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
806 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
807 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
808 				     k++) {
809 					if (!amdgpu_gfx_is_mec_queue_enabled(
810 							adev, xcc_id, i, k, j))
811 						continue;
812 
813 					r = gfx_v9_4_3_compute_ring_init(adev,
814 								       ring_id,
815 								       xcc_id,
816 								       i, k, j);
817 					if (r)
818 						return r;
819 
820 					ring_id++;
821 				}
822 			}
823 		}
824 
825 		r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
826 		if (r) {
827 			DRM_ERROR("Failed to init KIQ BOs!\n");
828 			return r;
829 		}
830 
831 		kiq = &adev->gfx.kiq[xcc_id];
832 		r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, xcc_id);
833 		if (r)
834 			return r;
835 
836 		/* create MQD for all compute queues as wel as KIQ for SRIOV case */
837 		r = amdgpu_gfx_mqd_sw_init(adev,
838 				sizeof(struct v9_mqd_allocation), xcc_id);
839 		if (r)
840 			return r;
841 	}
842 
843 	r = gfx_v9_4_3_gpu_early_init(adev);
844 	if (r)
845 		return r;
846 
847 	r = amdgpu_gfx_sysfs_init(adev);
848 	if (r)
849 		return r;
850 
851 	return amdgpu_gfx_ras_sw_init(adev);
852 }
853 
854 static int gfx_v9_4_3_sw_fini(void *handle)
855 {
856 	int i, num_xcc;
857 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
858 
859 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
860 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
861 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
862 
863 	for (i = 0; i < num_xcc; i++) {
864 		amdgpu_gfx_mqd_sw_fini(adev, i);
865 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
866 		amdgpu_gfx_kiq_fini(adev, i);
867 	}
868 
869 	gfx_v9_4_3_mec_fini(adev);
870 	amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
871 	gfx_v9_4_3_free_microcode(adev);
872 	amdgpu_gfx_sysfs_fini(adev);
873 
874 	return 0;
875 }
876 
877 #define DEFAULT_SH_MEM_BASES	(0x6000)
878 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
879 					     int xcc_id)
880 {
881 	int i;
882 	uint32_t sh_mem_config;
883 	uint32_t sh_mem_bases;
884 
885 	/*
886 	 * Configure apertures:
887 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
888 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
889 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
890 	 */
891 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
892 
893 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
894 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
895 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
896 
897 	mutex_lock(&adev->srbm_mutex);
898 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
899 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
900 		/* CP and shaders */
901 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
902 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
903 	}
904 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
905 	mutex_unlock(&adev->srbm_mutex);
906 
907 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
908 	   acccess. These should be enabled by FW for target VMIDs. */
909 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
910 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
911 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
912 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
913 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
914 	}
915 }
916 
917 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
918 {
919 	int vmid;
920 
921 	/*
922 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
923 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
924 	 * the driver can enable them for graphics. VMID0 should maintain
925 	 * access so that HWS firmware can save/restore entries.
926 	 */
927 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
928 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
929 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
930 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
931 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
932 	}
933 }
934 
935 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
936 					  int xcc_id)
937 {
938 	u32 tmp;
939 	int i;
940 
941 	/* XXX SH_MEM regs */
942 	/* where to put LDS, scratch, GPUVM in FSA64 space */
943 	mutex_lock(&adev->srbm_mutex);
944 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
945 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
946 		/* CP and shaders */
947 		if (i == 0) {
948 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
949 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
950 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
951 					    !!adev->gmc.noretry);
952 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
953 					 regSH_MEM_CONFIG, tmp);
954 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
955 					 regSH_MEM_BASES, 0);
956 		} else {
957 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
958 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
959 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
960 					    !!adev->gmc.noretry);
961 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
962 					 regSH_MEM_CONFIG, tmp);
963 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
964 					    (adev->gmc.private_aperture_start >>
965 					     48));
966 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
967 					    (adev->gmc.shared_aperture_start >>
968 					     48));
969 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
970 					 regSH_MEM_BASES, tmp);
971 		}
972 	}
973 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
974 
975 	mutex_unlock(&adev->srbm_mutex);
976 
977 	gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
978 	gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
979 }
980 
981 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
982 {
983 	int i, num_xcc;
984 
985 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
986 
987 	gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
988 	adev->gfx.config.db_debug2 =
989 		RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
990 
991 	for (i = 0; i < num_xcc; i++)
992 		gfx_v9_4_3_xcc_constants_init(adev, i);
993 }
994 
995 static void
996 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
997 					   int xcc_id)
998 {
999 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1000 }
1001 
1002 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1003 {
1004 	/*
1005 	 * Rlc save restore list is workable since v2_1.
1006 	 * And it's needed by gfxoff feature.
1007 	 */
1008 	if (adev->gfx.rlc.is_rlc_v2_1)
1009 		gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1010 }
1011 
1012 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1013 {
1014 	uint32_t data;
1015 
1016 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1017 	data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1018 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1019 }
1020 
1021 static void gfx_v9_4_3_xcc_program_xcc_id(struct amdgpu_device *adev,
1022 					  int xcc_id)
1023 {
1024 	uint32_t tmp = 0;
1025 	int num_xcc;
1026 
1027 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1028 	switch (num_xcc) {
1029 	/* directly config VIRTUAL_XCC_ID to 0 for 1-XCC */
1030 	case 1:
1031 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, 0x8);
1032 		break;
1033 	case 2:
1034 		tmp = (xcc_id % adev->gfx.num_xcc_per_xcp) << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, VIRTUAL_XCC_ID);
1035 		tmp = tmp | (adev->gfx.num_xcc_per_xcp << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, NUM_XCC_IN_XCP));
1036 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, tmp);
1037 
1038 		break;
1039 	default:
1040 		break;
1041 	}
1042 }
1043 
1044 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1045 {
1046 	uint32_t rlc_setting;
1047 
1048 	/* if RLC is not enabled, do nothing */
1049 	rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1050 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1051 		return false;
1052 
1053 	return true;
1054 }
1055 
1056 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1057 {
1058 	uint32_t data;
1059 	unsigned i;
1060 
1061 	data = RLC_SAFE_MODE__CMD_MASK;
1062 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1063 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1064 
1065 	/* wait for RLC_SAFE_MODE */
1066 	for (i = 0; i < adev->usec_timeout; i++) {
1067 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1068 			break;
1069 		udelay(1);
1070 	}
1071 }
1072 
1073 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1074 					   int xcc_id)
1075 {
1076 	uint32_t data;
1077 
1078 	data = RLC_SAFE_MODE__CMD_MASK;
1079 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1080 }
1081 
1082 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1083 {
1084 	/* init spm vmid with 0xf */
1085 	if (adev->gfx.rlc.funcs->update_spm_vmid)
1086 		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1087 
1088 	return 0;
1089 }
1090 
1091 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1092 					       int xcc_id)
1093 {
1094 	u32 i, j, k;
1095 	u32 mask;
1096 
1097 	mutex_lock(&adev->grbm_idx_mutex);
1098 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1099 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1100 			gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1101 						    xcc_id);
1102 			for (k = 0; k < adev->usec_timeout; k++) {
1103 				if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1104 					break;
1105 				udelay(1);
1106 			}
1107 			if (k == adev->usec_timeout) {
1108 				gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1109 							    0xffffffff,
1110 							    0xffffffff, xcc_id);
1111 				mutex_unlock(&adev->grbm_idx_mutex);
1112 				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1113 					 i, j);
1114 				return;
1115 			}
1116 		}
1117 	}
1118 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1119 				    xcc_id);
1120 	mutex_unlock(&adev->grbm_idx_mutex);
1121 
1122 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1123 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1124 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1125 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1126 	for (k = 0; k < adev->usec_timeout; k++) {
1127 		if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1128 			break;
1129 		udelay(1);
1130 	}
1131 }
1132 
1133 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1134 						     bool enable, int xcc_id)
1135 {
1136 	u32 tmp;
1137 
1138 	/* These interrupts should be enabled to drive DS clock */
1139 
1140 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1141 
1142 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1143 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1144 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1145 
1146 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1147 }
1148 
1149 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1150 {
1151 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1152 			      RLC_ENABLE_F32, 0);
1153 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1154 	gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1155 }
1156 
1157 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1158 {
1159 	int i, num_xcc;
1160 
1161 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1162 	for (i = 0; i < num_xcc; i++)
1163 		gfx_v9_4_3_xcc_rlc_stop(adev, i);
1164 }
1165 
1166 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1167 {
1168 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1169 			      SOFT_RESET_RLC, 1);
1170 	udelay(50);
1171 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1172 			      SOFT_RESET_RLC, 0);
1173 	udelay(50);
1174 }
1175 
1176 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1177 {
1178 	int i, num_xcc;
1179 
1180 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1181 	for (i = 0; i < num_xcc; i++)
1182 		gfx_v9_4_3_xcc_rlc_reset(adev, i);
1183 }
1184 
1185 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1186 {
1187 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1188 			      RLC_ENABLE_F32, 1);
1189 	udelay(50);
1190 
1191 	/* carrizo do enable cp interrupt after cp inited */
1192 	if (!(adev->flags & AMD_IS_APU)) {
1193 		gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1194 		udelay(50);
1195 	}
1196 }
1197 
1198 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1199 {
1200 #ifdef AMDGPU_RLC_DEBUG_RETRY
1201 	u32 rlc_ucode_ver;
1202 #endif
1203 	int i, num_xcc;
1204 
1205 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1206 	for (i = 0; i < num_xcc; i++) {
1207 		gfx_v9_4_3_xcc_rlc_start(adev, i);
1208 #ifdef AMDGPU_RLC_DEBUG_RETRY
1209 		/* RLC_GPM_GENERAL_6 : RLC Ucode version */
1210 		rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1211 		if (rlc_ucode_ver == 0x108) {
1212 			dev_info(adev->dev,
1213 				 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1214 				 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1215 			/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1216 			 * default is 0x9C4 to create a 100us interval */
1217 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1218 			/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1219 			 * to disable the page fault retry interrupts, default is
1220 			 * 0x100 (256) */
1221 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1222 		}
1223 #endif
1224 	}
1225 }
1226 
1227 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1228 					     int xcc_id)
1229 {
1230 	const struct rlc_firmware_header_v2_0 *hdr;
1231 	const __le32 *fw_data;
1232 	unsigned i, fw_size;
1233 
1234 	if (!adev->gfx.rlc_fw)
1235 		return -EINVAL;
1236 
1237 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1238 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1239 
1240 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1241 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1242 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1243 
1244 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1245 			RLCG_UCODE_LOADING_START_ADDRESS);
1246 	for (i = 0; i < fw_size; i++) {
1247 		if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1248 			dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1249 			msleep(1);
1250 		}
1251 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1252 	}
1253 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1254 
1255 	return 0;
1256 }
1257 
1258 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1259 {
1260 	int r;
1261 
1262 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1263 		gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1264 		/* legacy rlc firmware loading */
1265 		r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1266 		if (r)
1267 			return r;
1268 		gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1269 	}
1270 
1271 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1272 	/* disable CG */
1273 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1274 	gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1275 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1276 
1277 	return 0;
1278 }
1279 
1280 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1281 {
1282 	int r, i, num_xcc;
1283 
1284 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1285 	for (i = 0; i < num_xcc; i++) {
1286 		r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1287 		if (r)
1288 			return r;
1289 	}
1290 
1291 	return 0;
1292 }
1293 
1294 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev,
1295 				       unsigned vmid)
1296 {
1297 	u32 reg, data;
1298 
1299 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
1300 	if (amdgpu_sriov_is_pp_one_vf(adev))
1301 		data = RREG32_NO_KIQ(reg);
1302 	else
1303 		data = RREG32(reg);
1304 
1305 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
1306 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1307 
1308 	if (amdgpu_sriov_is_pp_one_vf(adev))
1309 		WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1310 	else
1311 		WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1312 }
1313 
1314 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1315 	{SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1316 	{SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1317 };
1318 
1319 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1320 					uint32_t offset,
1321 					struct soc15_reg_rlcg *entries, int arr_size)
1322 {
1323 	int i, inst;
1324 	uint32_t reg;
1325 
1326 	if (!entries)
1327 		return false;
1328 
1329 	for (i = 0; i < arr_size; i++) {
1330 		const struct soc15_reg_rlcg *entry;
1331 
1332 		entry = &entries[i];
1333 		inst = adev->ip_map.logical_to_dev_inst ?
1334 			       adev->ip_map.logical_to_dev_inst(
1335 				       adev, entry->hwip, entry->instance) :
1336 			       entry->instance;
1337 		reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1338 		      entry->reg;
1339 		if (offset == reg)
1340 			return true;
1341 	}
1342 
1343 	return false;
1344 }
1345 
1346 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1347 {
1348 	return gfx_v9_4_3_check_rlcg_range(adev, offset,
1349 					(void *)rlcg_access_gc_9_4_3,
1350 					ARRAY_SIZE(rlcg_access_gc_9_4_3));
1351 }
1352 
1353 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1354 					     bool enable, int xcc_id)
1355 {
1356 	if (enable) {
1357 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1358 	} else {
1359 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1360 			(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1361 		adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1362 	}
1363 	udelay(50);
1364 }
1365 
1366 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1367 						    int xcc_id)
1368 {
1369 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1370 	const __le32 *fw_data;
1371 	unsigned i;
1372 	u32 tmp;
1373 	u32 mec_ucode_addr_offset;
1374 	u32 mec_ucode_data_offset;
1375 
1376 	if (!adev->gfx.mec_fw)
1377 		return -EINVAL;
1378 
1379 	gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1380 
1381 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1382 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1383 
1384 	fw_data = (const __le32 *)
1385 		(adev->gfx.mec_fw->data +
1386 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1387 	tmp = 0;
1388 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1389 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1390 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1391 
1392 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1393 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1394 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1395 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1396 
1397 	mec_ucode_addr_offset =
1398 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1399 	mec_ucode_data_offset =
1400 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1401 
1402 	/* MEC1 */
1403 	WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1404 	for (i = 0; i < mec_hdr->jt_size; i++)
1405 		WREG32(mec_ucode_data_offset,
1406 		       le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1407 
1408 	WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1409 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1410 
1411 	return 0;
1412 }
1413 
1414 /* KIQ functions */
1415 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1416 {
1417 	uint32_t tmp;
1418 	struct amdgpu_device *adev = ring->adev;
1419 
1420 	/* tell RLC which is KIQ queue */
1421 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1422 	tmp &= 0xffffff00;
1423 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1424 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1425 	tmp |= 0x80;
1426 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1427 }
1428 
1429 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1430 {
1431 	struct amdgpu_device *adev = ring->adev;
1432 
1433 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1434 		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1435 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1436 			mqd->cp_hqd_queue_priority =
1437 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1438 		}
1439 	}
1440 }
1441 
1442 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1443 {
1444 	struct amdgpu_device *adev = ring->adev;
1445 	struct v9_mqd *mqd = ring->mqd_ptr;
1446 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1447 	uint32_t tmp;
1448 
1449 	mqd->header = 0xC0310800;
1450 	mqd->compute_pipelinestat_enable = 0x00000001;
1451 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1452 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1453 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1454 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1455 	mqd->compute_misc_reserved = 0x00000003;
1456 
1457 	mqd->dynamic_cu_mask_addr_lo =
1458 		lower_32_bits(ring->mqd_gpu_addr
1459 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1460 	mqd->dynamic_cu_mask_addr_hi =
1461 		upper_32_bits(ring->mqd_gpu_addr
1462 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1463 
1464 	eop_base_addr = ring->eop_gpu_addr >> 8;
1465 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1466 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1467 
1468 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1469 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1470 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1471 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1472 
1473 	mqd->cp_hqd_eop_control = tmp;
1474 
1475 	/* enable doorbell? */
1476 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1477 
1478 	if (ring->use_doorbell) {
1479 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1480 				    DOORBELL_OFFSET, ring->doorbell_index);
1481 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1482 				    DOORBELL_EN, 1);
1483 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1484 				    DOORBELL_SOURCE, 0);
1485 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1486 				    DOORBELL_HIT, 0);
1487 	} else {
1488 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1489 					 DOORBELL_EN, 0);
1490 	}
1491 
1492 	mqd->cp_hqd_pq_doorbell_control = tmp;
1493 
1494 	/* disable the queue if it's active */
1495 	ring->wptr = 0;
1496 	mqd->cp_hqd_dequeue_request = 0;
1497 	mqd->cp_hqd_pq_rptr = 0;
1498 	mqd->cp_hqd_pq_wptr_lo = 0;
1499 	mqd->cp_hqd_pq_wptr_hi = 0;
1500 
1501 	/* set the pointer to the MQD */
1502 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1503 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1504 
1505 	/* set MQD vmid to 0 */
1506 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1507 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1508 	mqd->cp_mqd_control = tmp;
1509 
1510 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1511 	hqd_gpu_addr = ring->gpu_addr >> 8;
1512 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1513 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1514 
1515 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1516 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1517 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1518 			    (order_base_2(ring->ring_size / 4) - 1));
1519 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1520 			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1521 #ifdef __BIG_ENDIAN
1522 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1523 #endif
1524 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1525 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1526 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1527 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1528 	mqd->cp_hqd_pq_control = tmp;
1529 
1530 	/* set the wb address whether it's enabled or not */
1531 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1532 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1533 	mqd->cp_hqd_pq_rptr_report_addr_hi =
1534 		upper_32_bits(wb_gpu_addr) & 0xffff;
1535 
1536 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1537 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1538 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1539 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1540 
1541 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1542 	ring->wptr = 0;
1543 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1544 
1545 	/* set the vmid for the queue */
1546 	mqd->cp_hqd_vmid = 0;
1547 
1548 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1549 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1550 	mqd->cp_hqd_persistent_state = tmp;
1551 
1552 	/* set MIN_IB_AVAIL_SIZE */
1553 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1554 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1555 	mqd->cp_hqd_ib_control = tmp;
1556 
1557 	/* set static priority for a queue/ring */
1558 	gfx_v9_4_3_mqd_set_priority(ring, mqd);
1559 	mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1560 
1561 	/* map_queues packet doesn't need activate the queue,
1562 	 * so only kiq need set this field.
1563 	 */
1564 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1565 		mqd->cp_hqd_active = 1;
1566 
1567 	return 0;
1568 }
1569 
1570 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1571 					    int xcc_id)
1572 {
1573 	struct amdgpu_device *adev = ring->adev;
1574 	struct v9_mqd *mqd = ring->mqd_ptr;
1575 	int j;
1576 
1577 	/* disable wptr polling */
1578 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1579 
1580 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1581 	       mqd->cp_hqd_eop_base_addr_lo);
1582 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1583 	       mqd->cp_hqd_eop_base_addr_hi);
1584 
1585 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1586 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1587 	       mqd->cp_hqd_eop_control);
1588 
1589 	/* enable doorbell? */
1590 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1591 	       mqd->cp_hqd_pq_doorbell_control);
1592 
1593 	/* disable the queue if it's active */
1594 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1595 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1596 		for (j = 0; j < adev->usec_timeout; j++) {
1597 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1598 				break;
1599 			udelay(1);
1600 		}
1601 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1602 		       mqd->cp_hqd_dequeue_request);
1603 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1604 		       mqd->cp_hqd_pq_rptr);
1605 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1606 		       mqd->cp_hqd_pq_wptr_lo);
1607 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1608 		       mqd->cp_hqd_pq_wptr_hi);
1609 	}
1610 
1611 	/* set the pointer to the MQD */
1612 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
1613 	       mqd->cp_mqd_base_addr_lo);
1614 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
1615 	       mqd->cp_mqd_base_addr_hi);
1616 
1617 	/* set MQD vmid to 0 */
1618 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
1619 	       mqd->cp_mqd_control);
1620 
1621 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1622 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
1623 	       mqd->cp_hqd_pq_base_lo);
1624 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
1625 	       mqd->cp_hqd_pq_base_hi);
1626 
1627 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1628 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
1629 	       mqd->cp_hqd_pq_control);
1630 
1631 	/* set the wb address whether it's enabled or not */
1632 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
1633 				mqd->cp_hqd_pq_rptr_report_addr_lo);
1634 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
1635 				mqd->cp_hqd_pq_rptr_report_addr_hi);
1636 
1637 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1638 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
1639 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
1640 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
1641 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
1642 
1643 	/* enable the doorbell if requested */
1644 	if (ring->use_doorbell) {
1645 		WREG32_SOC15(
1646 			GC, GET_INST(GC, xcc_id),
1647 			regCP_MEC_DOORBELL_RANGE_LOWER,
1648 			((adev->doorbell_index.kiq +
1649 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
1650 			 2) << 2);
1651 		WREG32_SOC15(
1652 			GC, GET_INST(GC, xcc_id),
1653 			regCP_MEC_DOORBELL_RANGE_UPPER,
1654 			((adev->doorbell_index.userqueue_end +
1655 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
1656 			 2) << 2);
1657 	}
1658 
1659 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1660 	       mqd->cp_hqd_pq_doorbell_control);
1661 
1662 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1663 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1664 	       mqd->cp_hqd_pq_wptr_lo);
1665 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1666 	       mqd->cp_hqd_pq_wptr_hi);
1667 
1668 	/* set the vmid for the queue */
1669 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
1670 
1671 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
1672 	       mqd->cp_hqd_persistent_state);
1673 
1674 	/* activate the queue */
1675 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
1676 	       mqd->cp_hqd_active);
1677 
1678 	if (ring->use_doorbell)
1679 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
1680 
1681 	return 0;
1682 }
1683 
1684 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
1685 					    int xcc_id)
1686 {
1687 	struct amdgpu_device *adev = ring->adev;
1688 	int j;
1689 
1690 	/* disable the queue if it's active */
1691 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1692 
1693 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1694 
1695 		for (j = 0; j < adev->usec_timeout; j++) {
1696 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1697 				break;
1698 			udelay(1);
1699 		}
1700 
1701 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
1702 			DRM_DEBUG("%s dequeue request failed.\n", ring->name);
1703 
1704 			/* Manual disable if dequeue request times out */
1705 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
1706 		}
1707 
1708 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1709 		      0);
1710 	}
1711 
1712 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
1713 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
1714 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 0);
1715 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
1716 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
1717 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
1718 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
1719 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
1720 
1721 	return 0;
1722 }
1723 
1724 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
1725 {
1726 	struct amdgpu_device *adev = ring->adev;
1727 	struct v9_mqd *mqd = ring->mqd_ptr;
1728 	struct v9_mqd *tmp_mqd;
1729 
1730 	gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
1731 
1732 	/* GPU could be in bad state during probe, driver trigger the reset
1733 	 * after load the SMU, in this case , the mqd is not be initialized.
1734 	 * driver need to re-init the mqd.
1735 	 * check mqd->cp_hqd_pq_control since this value should not be 0
1736 	 */
1737 	tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
1738 	if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
1739 		/* for GPU_RESET case , reset MQD to a clean status */
1740 		if (adev->gfx.kiq[xcc_id].mqd_backup)
1741 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
1742 
1743 		/* reset ring buffer */
1744 		ring->wptr = 0;
1745 		amdgpu_ring_clear_ring(ring);
1746 		mutex_lock(&adev->srbm_mutex);
1747 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1748 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
1749 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1750 		mutex_unlock(&adev->srbm_mutex);
1751 	} else {
1752 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
1753 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
1754 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
1755 		mutex_lock(&adev->srbm_mutex);
1756 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1757 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
1758 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
1759 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1760 		mutex_unlock(&adev->srbm_mutex);
1761 
1762 		if (adev->gfx.kiq[xcc_id].mqd_backup)
1763 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
1764 	}
1765 
1766 	return 0;
1767 }
1768 
1769 static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
1770 {
1771 	struct amdgpu_device *adev = ring->adev;
1772 	struct v9_mqd *mqd = ring->mqd_ptr;
1773 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
1774 	struct v9_mqd *tmp_mqd;
1775 
1776 	/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
1777 	 * is not be initialized before
1778 	 */
1779 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
1780 
1781 	if (!tmp_mqd->cp_hqd_pq_control ||
1782 	    (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
1783 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
1784 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
1785 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
1786 		mutex_lock(&adev->srbm_mutex);
1787 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1788 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
1789 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1790 		mutex_unlock(&adev->srbm_mutex);
1791 
1792 		if (adev->gfx.mec.mqd_backup[mqd_idx])
1793 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
1794 	} else {
1795 		/* restore MQD to a clean status */
1796 		if (adev->gfx.mec.mqd_backup[mqd_idx])
1797 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
1798 		/* reset ring buffer */
1799 		ring->wptr = 0;
1800 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
1801 		amdgpu_ring_clear_ring(ring);
1802 	}
1803 
1804 	return 0;
1805 }
1806 
1807 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
1808 {
1809 	struct amdgpu_ring *ring;
1810 	int j;
1811 
1812 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
1813 		ring = &adev->gfx.compute_ring[j +  xcc_id * adev->gfx.num_compute_rings];
1814 		if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
1815 			mutex_lock(&adev->srbm_mutex);
1816 			soc15_grbm_select(adev, ring->me,
1817 					ring->pipe,
1818 					ring->queue, 0, GET_INST(GC, xcc_id));
1819 			gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
1820 			soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1821 			mutex_unlock(&adev->srbm_mutex);
1822 		}
1823 	}
1824 
1825 	return 0;
1826 }
1827 
1828 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
1829 {
1830 	struct amdgpu_ring *ring;
1831 	int r;
1832 
1833 	ring = &adev->gfx.kiq[xcc_id].ring;
1834 
1835 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
1836 	if (unlikely(r != 0))
1837 		return r;
1838 
1839 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
1840 	if (unlikely(r != 0)) {
1841 		amdgpu_bo_unreserve(ring->mqd_obj);
1842 		return r;
1843 	}
1844 
1845 	gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
1846 	amdgpu_bo_kunmap(ring->mqd_obj);
1847 	ring->mqd_ptr = NULL;
1848 	amdgpu_bo_unreserve(ring->mqd_obj);
1849 	ring->sched.ready = true;
1850 	return 0;
1851 }
1852 
1853 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
1854 {
1855 	struct amdgpu_ring *ring = NULL;
1856 	int r = 0, i;
1857 
1858 	gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
1859 
1860 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1861 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
1862 
1863 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
1864 		if (unlikely(r != 0))
1865 			goto done;
1866 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
1867 		if (!r) {
1868 			r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id);
1869 			amdgpu_bo_kunmap(ring->mqd_obj);
1870 			ring->mqd_ptr = NULL;
1871 		}
1872 		amdgpu_bo_unreserve(ring->mqd_obj);
1873 		if (r)
1874 			goto done;
1875 	}
1876 
1877 	r = amdgpu_gfx_enable_kcq(adev, xcc_id);
1878 done:
1879 	return r;
1880 }
1881 
1882 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
1883 {
1884 	struct amdgpu_ring *ring;
1885 	int r, j;
1886 
1887 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1888 
1889 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1890 		gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
1891 
1892 		r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
1893 		if (r)
1894 			return r;
1895 	}
1896 
1897 	/* set the virtual and physical id based on partition_mode */
1898 	gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id);
1899 
1900 	r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
1901 	if (r)
1902 		return r;
1903 
1904 	r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
1905 	if (r)
1906 		return r;
1907 
1908 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
1909 		ring = &adev->gfx.compute_ring
1910 				[j + xcc_id * adev->gfx.num_compute_rings];
1911 		r = amdgpu_ring_test_helper(ring);
1912 		if (r)
1913 			return r;
1914 	}
1915 
1916 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1917 
1918 	return 0;
1919 }
1920 
1921 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
1922 {
1923 	int r = 0, i, num_xcc;
1924 
1925 	if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
1926 					    AMDGPU_XCP_FL_NONE) ==
1927 	    AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
1928 		r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr,
1929 						     amdgpu_user_partt_mode);
1930 
1931 	if (r)
1932 		return r;
1933 
1934 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1935 	for (i = 0; i < num_xcc; i++) {
1936 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
1937 		if (r)
1938 			return r;
1939 	}
1940 
1941 	return 0;
1942 }
1943 
1944 static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable,
1945 				     int xcc_id)
1946 {
1947 	gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id);
1948 }
1949 
1950 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
1951 {
1952 	if (amdgpu_gfx_disable_kcq(adev, xcc_id))
1953 		DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
1954 
1955 	/* Use deinitialize sequence from CAIL when unbinding device
1956 	 * from driver, otherwise KIQ is hanging when binding back
1957 	 */
1958 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
1959 		mutex_lock(&adev->srbm_mutex);
1960 		soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
1961 				  adev->gfx.kiq[xcc_id].ring.pipe,
1962 				  adev->gfx.kiq[xcc_id].ring.queue, 0,
1963 				  GET_INST(GC, xcc_id));
1964 		gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
1965 						 xcc_id);
1966 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1967 		mutex_unlock(&adev->srbm_mutex);
1968 	}
1969 
1970 	gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
1971 	gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id);
1972 }
1973 
1974 static int gfx_v9_4_3_hw_init(void *handle)
1975 {
1976 	int r;
1977 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1978 
1979 	gfx_v9_4_3_init_golden_registers(adev);
1980 
1981 	gfx_v9_4_3_constants_init(adev);
1982 
1983 	r = adev->gfx.rlc.funcs->resume(adev);
1984 	if (r)
1985 		return r;
1986 
1987 	r = gfx_v9_4_3_cp_resume(adev);
1988 	if (r)
1989 		return r;
1990 
1991 	return r;
1992 }
1993 
1994 static int gfx_v9_4_3_hw_fini(void *handle)
1995 {
1996 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1997 	int i, num_xcc;
1998 
1999 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2000 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2001 
2002 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2003 	for (i = 0; i < num_xcc; i++) {
2004 		gfx_v9_4_3_xcc_fini(adev, i);
2005 	}
2006 
2007 	return 0;
2008 }
2009 
2010 static int gfx_v9_4_3_suspend(void *handle)
2011 {
2012 	return gfx_v9_4_3_hw_fini(handle);
2013 }
2014 
2015 static int gfx_v9_4_3_resume(void *handle)
2016 {
2017 	return gfx_v9_4_3_hw_init(handle);
2018 }
2019 
2020 static bool gfx_v9_4_3_is_idle(void *handle)
2021 {
2022 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2023 	int i, num_xcc;
2024 
2025 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2026 	for (i = 0; i < num_xcc; i++) {
2027 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2028 					GRBM_STATUS, GUI_ACTIVE))
2029 			return false;
2030 	}
2031 	return true;
2032 }
2033 
2034 static int gfx_v9_4_3_wait_for_idle(void *handle)
2035 {
2036 	unsigned i;
2037 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2038 
2039 	for (i = 0; i < adev->usec_timeout; i++) {
2040 		if (gfx_v9_4_3_is_idle(handle))
2041 			return 0;
2042 		udelay(1);
2043 	}
2044 	return -ETIMEDOUT;
2045 }
2046 
2047 static int gfx_v9_4_3_soft_reset(void *handle)
2048 {
2049 	u32 grbm_soft_reset = 0;
2050 	u32 tmp;
2051 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2052 
2053 	/* GRBM_STATUS */
2054 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2055 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2056 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2057 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2058 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2059 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2060 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2061 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2062 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2063 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2064 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2065 	}
2066 
2067 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2068 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2069 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2070 	}
2071 
2072 	/* GRBM_STATUS2 */
2073 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2074 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2075 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2076 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2077 
2078 
2079 	if (grbm_soft_reset) {
2080 		/* stop the rlc */
2081 		adev->gfx.rlc.funcs->stop(adev);
2082 
2083 		/* Disable MEC parsing/prefetching */
2084 		gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2085 
2086 		if (grbm_soft_reset) {
2087 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2088 			tmp |= grbm_soft_reset;
2089 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2090 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2091 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2092 
2093 			udelay(50);
2094 
2095 			tmp &= ~grbm_soft_reset;
2096 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2097 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2098 		}
2099 
2100 		/* Wait a little for things to settle down */
2101 		udelay(50);
2102 	}
2103 	return 0;
2104 }
2105 
2106 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2107 					  uint32_t vmid,
2108 					  uint32_t gds_base, uint32_t gds_size,
2109 					  uint32_t gws_base, uint32_t gws_size,
2110 					  uint32_t oa_base, uint32_t oa_size)
2111 {
2112 	struct amdgpu_device *adev = ring->adev;
2113 
2114 	/* GDS Base */
2115 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2116 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2117 				   gds_base);
2118 
2119 	/* GDS Size */
2120 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2121 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2122 				   gds_size);
2123 
2124 	/* GWS */
2125 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2126 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2127 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2128 
2129 	/* OA */
2130 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2131 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2132 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
2133 }
2134 
2135 static int gfx_v9_4_3_early_init(void *handle)
2136 {
2137 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2138 	int num_xcc;
2139 
2140 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2141 
2142 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2143 					  AMDGPU_MAX_COMPUTE_RINGS);
2144 	gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2145 	gfx_v9_4_3_set_ring_funcs(adev);
2146 	gfx_v9_4_3_set_irq_funcs(adev);
2147 	gfx_v9_4_3_set_gds_init(adev);
2148 	gfx_v9_4_3_set_rlc_funcs(adev);
2149 
2150 	return gfx_v9_4_3_init_microcode(adev);
2151 }
2152 
2153 static int gfx_v9_4_3_late_init(void *handle)
2154 {
2155 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2156 	int r;
2157 
2158 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2159 	if (r)
2160 		return r;
2161 
2162 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2163 	if (r)
2164 		return r;
2165 
2166 	return 0;
2167 }
2168 
2169 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2170 					    bool enable, int xcc_id)
2171 {
2172 	uint32_t def, data;
2173 
2174 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2175 		return;
2176 
2177 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2178 				  regRLC_CGTT_MGCG_OVERRIDE);
2179 
2180 	if (enable)
2181 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2182 	else
2183 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2184 
2185 	if (def != data)
2186 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2187 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2188 
2189 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL);
2190 
2191 	if (enable)
2192 		data &= ~RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
2193 	else
2194 		data |= RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
2195 
2196 	if (def != data)
2197 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL, data);
2198 }
2199 
2200 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2201 						bool enable, int xcc_id)
2202 {
2203 	uint32_t def, data;
2204 
2205 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2206 		return;
2207 
2208 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2209 				  regRLC_CGTT_MGCG_OVERRIDE);
2210 
2211 	if (enable)
2212 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2213 	else
2214 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2215 
2216 	if (def != data)
2217 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2218 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2219 }
2220 
2221 static void
2222 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2223 						bool enable, int xcc_id)
2224 {
2225 	uint32_t data, def;
2226 
2227 	/* It is disabled by HW by default */
2228 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2229 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
2230 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2231 
2232 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2233 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2234 			  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2235 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2236 
2237 		if (def != data)
2238 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2239 
2240 		/* MGLS is a global flag to control all MGLS in GFX */
2241 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2242 			/* 2 - RLC memory Light sleep */
2243 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2244 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2245 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2246 				if (def != data)
2247 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2248 			}
2249 			/* 3 - CP memory Light sleep */
2250 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2251 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2252 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2253 				if (def != data)
2254 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2255 			}
2256 		}
2257 	} else {
2258 		/* 1 - MGCG_OVERRIDE */
2259 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2260 
2261 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2262 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2263 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2264 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2265 
2266 		if (def != data)
2267 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2268 
2269 		/* 2 - disable MGLS in RLC */
2270 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2271 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2272 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2273 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2274 		}
2275 
2276 		/* 3 - disable MGLS in CP */
2277 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2278 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2279 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2280 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2281 		}
2282 	}
2283 
2284 }
2285 
2286 static void
2287 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2288 						bool enable, int xcc_id)
2289 {
2290 	uint32_t def, data;
2291 
2292 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2293 
2294 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2295 		/* unset CGCG override */
2296 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2297 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2298 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2299 		else
2300 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2301 		/* update CGCG and CGLS override bits */
2302 		if (def != data)
2303 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2304 
2305 		/* enable cgcg FSM(0x0000363F) */
2306 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2307 
2308 		data = (0x36
2309 			<< RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2310 		       RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2311 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2312 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2313 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2314 		if (def != data)
2315 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2316 
2317 		/* set IDLE_POLL_COUNT(0x00900100) */
2318 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2319 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2320 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2321 		if (def != data)
2322 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2323 	} else {
2324 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2325 		/* reset CGCG/CGLS bits */
2326 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2327 		/* disable cgcg and cgls in FSM */
2328 		if (def != data)
2329 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2330 	}
2331 
2332 }
2333 
2334 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2335 						  bool enable, int xcc_id)
2336 {
2337 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2338 
2339 	if (enable) {
2340 		/* FGCG */
2341 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2342 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2343 
2344 		/* CGCG/CGLS should be enabled after MGCG/MGLS
2345 		 * ===  MGCG + MGLS ===
2346 		 */
2347 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2348 								xcc_id);
2349 		/* ===  CGCG + CGLS === */
2350 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2351 								xcc_id);
2352 	} else {
2353 		/* CGCG/CGLS should be disabled before MGCG/MGLS
2354 		 * ===  CGCG + CGLS ===
2355 		 */
2356 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2357 								xcc_id);
2358 		/* ===  MGCG + MGLS === */
2359 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2360 								xcc_id);
2361 
2362 		/* FGCG */
2363 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2364 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2365 	}
2366 
2367 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2368 
2369 	return 0;
2370 }
2371 
2372 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2373 	.is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2374 	.set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2375 	.unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2376 	.init = gfx_v9_4_3_rlc_init,
2377 	.resume = gfx_v9_4_3_rlc_resume,
2378 	.stop = gfx_v9_4_3_rlc_stop,
2379 	.reset = gfx_v9_4_3_rlc_reset,
2380 	.start = gfx_v9_4_3_rlc_start,
2381 	.update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2382 	.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2383 };
2384 
2385 static int gfx_v9_4_3_set_powergating_state(void *handle,
2386 					  enum amd_powergating_state state)
2387 {
2388 	return 0;
2389 }
2390 
2391 static int gfx_v9_4_3_set_clockgating_state(void *handle,
2392 					  enum amd_clockgating_state state)
2393 {
2394 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2395 	int i, num_xcc;
2396 
2397 	if (amdgpu_sriov_vf(adev))
2398 		return 0;
2399 
2400 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2401 	switch (adev->ip_versions[GC_HWIP][0]) {
2402 	case IP_VERSION(9, 4, 3):
2403 		for (i = 0; i < num_xcc; i++)
2404 			gfx_v9_4_3_xcc_update_gfx_clock_gating(
2405 				adev, state == AMD_CG_STATE_GATE, i);
2406 		break;
2407 	default:
2408 		break;
2409 	}
2410 	return 0;
2411 }
2412 
2413 static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
2414 {
2415 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2416 	int data;
2417 
2418 	if (amdgpu_sriov_vf(adev))
2419 		*flags = 0;
2420 
2421 	/* AMD_CG_SUPPORT_GFX_MGCG */
2422 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2423 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2424 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
2425 
2426 	/* AMD_CG_SUPPORT_GFX_CGCG */
2427 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2428 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2429 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
2430 
2431 	/* AMD_CG_SUPPORT_GFX_CGLS */
2432 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2433 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
2434 
2435 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
2436 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2437 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2438 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2439 
2440 	/* AMD_CG_SUPPORT_GFX_CP_LS */
2441 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2442 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2443 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2444 }
2445 
2446 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2447 {
2448 	struct amdgpu_device *adev = ring->adev;
2449 	u32 ref_and_mask, reg_mem_engine;
2450 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
2451 
2452 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2453 		switch (ring->me) {
2454 		case 1:
2455 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2456 			break;
2457 		case 2:
2458 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2459 			break;
2460 		default:
2461 			return;
2462 		}
2463 		reg_mem_engine = 0;
2464 	} else {
2465 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2466 		reg_mem_engine = 1; /* pfp */
2467 	}
2468 
2469 	gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2470 			      adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2471 			      adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2472 			      ref_and_mask, ref_and_mask, 0x20);
2473 }
2474 
2475 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2476 					  struct amdgpu_job *job,
2477 					  struct amdgpu_ib *ib,
2478 					  uint32_t flags)
2479 {
2480 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2481 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2482 
2483 	/* Currently, there is a high possibility to get wave ID mismatch
2484 	 * between ME and GDS, leading to a hw deadlock, because ME generates
2485 	 * different wave IDs than the GDS expects. This situation happens
2486 	 * randomly when at least 5 compute pipes use GDS ordered append.
2487 	 * The wave IDs generated by ME are also wrong after suspend/resume.
2488 	 * Those are probably bugs somewhere else in the kernel driver.
2489 	 *
2490 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2491 	 * GDS to 0 for this ring (me/pipe).
2492 	 */
2493 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2494 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2495 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2496 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2497 	}
2498 
2499 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2500 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2501 	amdgpu_ring_write(ring,
2502 #ifdef __BIG_ENDIAN
2503 				(2 << 0) |
2504 #endif
2505 				lower_32_bits(ib->gpu_addr));
2506 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2507 	amdgpu_ring_write(ring, control);
2508 }
2509 
2510 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2511 				     u64 seq, unsigned flags)
2512 {
2513 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2514 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2515 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2516 
2517 	/* RELEASE_MEM - flush caches, send int */
2518 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2519 	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2520 					       EOP_TC_NC_ACTION_EN) :
2521 					      (EOP_TCL1_ACTION_EN |
2522 					       EOP_TC_ACTION_EN |
2523 					       EOP_TC_WB_ACTION_EN |
2524 					       EOP_TC_MD_ACTION_EN)) |
2525 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2526 				 EVENT_INDEX(5)));
2527 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2528 
2529 	/*
2530 	 * the address should be Qword aligned if 64bit write, Dword
2531 	 * aligned if only send 32bit data low (discard data high)
2532 	 */
2533 	if (write64bit)
2534 		BUG_ON(addr & 0x7);
2535 	else
2536 		BUG_ON(addr & 0x3);
2537 	amdgpu_ring_write(ring, lower_32_bits(addr));
2538 	amdgpu_ring_write(ring, upper_32_bits(addr));
2539 	amdgpu_ring_write(ring, lower_32_bits(seq));
2540 	amdgpu_ring_write(ring, upper_32_bits(seq));
2541 	amdgpu_ring_write(ring, 0);
2542 }
2543 
2544 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2545 {
2546 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2547 	uint32_t seq = ring->fence_drv.sync_seq;
2548 	uint64_t addr = ring->fence_drv.gpu_addr;
2549 
2550 	gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2551 			      lower_32_bits(addr), upper_32_bits(addr),
2552 			      seq, 0xffffffff, 4);
2553 }
2554 
2555 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2556 					unsigned vmid, uint64_t pd_addr)
2557 {
2558 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2559 }
2560 
2561 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2562 {
2563 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2564 }
2565 
2566 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2567 {
2568 	u64 wptr;
2569 
2570 	/* XXX check if swapping is necessary on BE */
2571 	if (ring->use_doorbell)
2572 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2573 	else
2574 		BUG();
2575 	return wptr;
2576 }
2577 
2578 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2579 {
2580 	struct amdgpu_device *adev = ring->adev;
2581 
2582 	/* XXX check if swapping is necessary on BE */
2583 	if (ring->use_doorbell) {
2584 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2585 		WDOORBELL64(ring->doorbell_index, ring->wptr);
2586 	} else {
2587 		BUG(); /* only DOORBELL method supported on gfx9 now */
2588 	}
2589 }
2590 
2591 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2592 					 u64 seq, unsigned int flags)
2593 {
2594 	struct amdgpu_device *adev = ring->adev;
2595 
2596 	/* we only allocate 32bit for each seq wb address */
2597 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2598 
2599 	/* write fence seq to the "addr" */
2600 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2601 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2602 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
2603 	amdgpu_ring_write(ring, lower_32_bits(addr));
2604 	amdgpu_ring_write(ring, upper_32_bits(addr));
2605 	amdgpu_ring_write(ring, lower_32_bits(seq));
2606 
2607 	if (flags & AMDGPU_FENCE_FLAG_INT) {
2608 		/* set register to trigger INT */
2609 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2610 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2611 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
2612 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
2613 		amdgpu_ring_write(ring, 0);
2614 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
2615 	}
2616 }
2617 
2618 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
2619 				    uint32_t reg_val_offs)
2620 {
2621 	struct amdgpu_device *adev = ring->adev;
2622 
2623 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
2624 	amdgpu_ring_write(ring, 0 |	/* src: register*/
2625 				(5 << 8) |	/* dst: memory */
2626 				(1 << 20));	/* write confirm */
2627 	amdgpu_ring_write(ring, reg);
2628 	amdgpu_ring_write(ring, 0);
2629 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
2630 				reg_val_offs * 4));
2631 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
2632 				reg_val_offs * 4));
2633 }
2634 
2635 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
2636 				    uint32_t val)
2637 {
2638 	uint32_t cmd = 0;
2639 
2640 	switch (ring->funcs->type) {
2641 	case AMDGPU_RING_TYPE_GFX:
2642 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
2643 		break;
2644 	case AMDGPU_RING_TYPE_KIQ:
2645 		cmd = (1 << 16); /* no inc addr */
2646 		break;
2647 	default:
2648 		cmd = WR_CONFIRM;
2649 		break;
2650 	}
2651 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2652 	amdgpu_ring_write(ring, cmd);
2653 	amdgpu_ring_write(ring, reg);
2654 	amdgpu_ring_write(ring, 0);
2655 	amdgpu_ring_write(ring, val);
2656 }
2657 
2658 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
2659 					uint32_t val, uint32_t mask)
2660 {
2661 	gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
2662 }
2663 
2664 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
2665 						  uint32_t reg0, uint32_t reg1,
2666 						  uint32_t ref, uint32_t mask)
2667 {
2668 	amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
2669 						   ref, mask);
2670 }
2671 
2672 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2673 	struct amdgpu_device *adev, int me, int pipe,
2674 	enum amdgpu_interrupt_state state, int xcc_id)
2675 {
2676 	u32 mec_int_cntl, mec_int_cntl_reg;
2677 
2678 	/*
2679 	 * amdgpu controls only the first MEC. That's why this function only
2680 	 * handles the setting of interrupts for this specific MEC. All other
2681 	 * pipes' interrupts are set by amdkfd.
2682 	 */
2683 
2684 	if (me == 1) {
2685 		switch (pipe) {
2686 		case 0:
2687 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
2688 			break;
2689 		case 1:
2690 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
2691 			break;
2692 		case 2:
2693 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
2694 			break;
2695 		case 3:
2696 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
2697 			break;
2698 		default:
2699 			DRM_DEBUG("invalid pipe %d\n", pipe);
2700 			return;
2701 		}
2702 	} else {
2703 		DRM_DEBUG("invalid me %d\n", me);
2704 		return;
2705 	}
2706 
2707 	switch (state) {
2708 	case AMDGPU_IRQ_STATE_DISABLE:
2709 		mec_int_cntl = RREG32(mec_int_cntl_reg);
2710 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
2711 					     TIME_STAMP_INT_ENABLE, 0);
2712 		WREG32(mec_int_cntl_reg, mec_int_cntl);
2713 		break;
2714 	case AMDGPU_IRQ_STATE_ENABLE:
2715 		mec_int_cntl = RREG32(mec_int_cntl_reg);
2716 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
2717 					     TIME_STAMP_INT_ENABLE, 1);
2718 		WREG32(mec_int_cntl_reg, mec_int_cntl);
2719 		break;
2720 	default:
2721 		break;
2722 	}
2723 }
2724 
2725 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
2726 					     struct amdgpu_irq_src *source,
2727 					     unsigned type,
2728 					     enum amdgpu_interrupt_state state)
2729 {
2730 	int i, num_xcc;
2731 
2732 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2733 	switch (state) {
2734 	case AMDGPU_IRQ_STATE_DISABLE:
2735 	case AMDGPU_IRQ_STATE_ENABLE:
2736 		for (i = 0; i < num_xcc; i++)
2737 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
2738 				PRIV_REG_INT_ENABLE,
2739 				state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2740 		break;
2741 	default:
2742 		break;
2743 	}
2744 
2745 	return 0;
2746 }
2747 
2748 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
2749 					      struct amdgpu_irq_src *source,
2750 					      unsigned type,
2751 					      enum amdgpu_interrupt_state state)
2752 {
2753 	int i, num_xcc;
2754 
2755 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2756 	switch (state) {
2757 	case AMDGPU_IRQ_STATE_DISABLE:
2758 	case AMDGPU_IRQ_STATE_ENABLE:
2759 		for (i = 0; i < num_xcc; i++)
2760 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
2761 				PRIV_INSTR_INT_ENABLE,
2762 				state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2763 		break;
2764 	default:
2765 		break;
2766 	}
2767 
2768 	return 0;
2769 }
2770 
2771 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
2772 					    struct amdgpu_irq_src *src,
2773 					    unsigned type,
2774 					    enum amdgpu_interrupt_state state)
2775 {
2776 	int i, num_xcc;
2777 
2778 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2779 	for (i = 0; i < num_xcc; i++) {
2780 		switch (type) {
2781 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
2782 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2783 				adev, 1, 0, state, i);
2784 			break;
2785 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
2786 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2787 				adev, 1, 1, state, i);
2788 			break;
2789 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
2790 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2791 				adev, 1, 2, state, i);
2792 			break;
2793 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
2794 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2795 				adev, 1, 3, state, i);
2796 			break;
2797 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
2798 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2799 				adev, 2, 0, state, i);
2800 			break;
2801 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
2802 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2803 				adev, 2, 1, state, i);
2804 			break;
2805 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
2806 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2807 				adev, 2, 2, state, i);
2808 			break;
2809 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
2810 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2811 				adev, 2, 3, state, i);
2812 			break;
2813 		default:
2814 			break;
2815 		}
2816 	}
2817 
2818 	return 0;
2819 }
2820 
2821 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
2822 			    struct amdgpu_irq_src *source,
2823 			    struct amdgpu_iv_entry *entry)
2824 {
2825 	int i, xcc_id;
2826 	u8 me_id, pipe_id, queue_id;
2827 	struct amdgpu_ring *ring;
2828 
2829 	DRM_DEBUG("IH: CP EOP\n");
2830 	me_id = (entry->ring_id & 0x0c) >> 2;
2831 	pipe_id = (entry->ring_id & 0x03) >> 0;
2832 	queue_id = (entry->ring_id & 0x70) >> 4;
2833 
2834 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
2835 
2836 	if (xcc_id == -EINVAL)
2837 		return -EINVAL;
2838 
2839 	switch (me_id) {
2840 	case 0:
2841 	case 1:
2842 	case 2:
2843 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2844 			ring = &adev->gfx.compute_ring
2845 					[i +
2846 					 xcc_id * adev->gfx.num_compute_rings];
2847 			/* Per-queue interrupt is supported for MEC starting from VI.
2848 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
2849 			  */
2850 
2851 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
2852 				amdgpu_fence_process(ring);
2853 		}
2854 		break;
2855 	}
2856 	return 0;
2857 }
2858 
2859 static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
2860 			   struct amdgpu_iv_entry *entry)
2861 {
2862 	u8 me_id, pipe_id, queue_id;
2863 	struct amdgpu_ring *ring;
2864 	int i, xcc_id;
2865 
2866 	me_id = (entry->ring_id & 0x0c) >> 2;
2867 	pipe_id = (entry->ring_id & 0x03) >> 0;
2868 	queue_id = (entry->ring_id & 0x70) >> 4;
2869 
2870 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
2871 
2872 	if (xcc_id == -EINVAL)
2873 		return;
2874 
2875 	switch (me_id) {
2876 	case 0:
2877 	case 1:
2878 	case 2:
2879 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2880 			ring = &adev->gfx.compute_ring
2881 					[i +
2882 					 xcc_id * adev->gfx.num_compute_rings];
2883 			if (ring->me == me_id && ring->pipe == pipe_id &&
2884 			    ring->queue == queue_id)
2885 				drm_sched_fault(&ring->sched);
2886 		}
2887 		break;
2888 	}
2889 }
2890 
2891 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
2892 				 struct amdgpu_irq_src *source,
2893 				 struct amdgpu_iv_entry *entry)
2894 {
2895 	DRM_ERROR("Illegal register access in command stream\n");
2896 	gfx_v9_4_3_fault(adev, entry);
2897 	return 0;
2898 }
2899 
2900 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
2901 				  struct amdgpu_irq_src *source,
2902 				  struct amdgpu_iv_entry *entry)
2903 {
2904 	DRM_ERROR("Illegal instruction in command stream\n");
2905 	gfx_v9_4_3_fault(adev, entry);
2906 	return 0;
2907 }
2908 
2909 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
2910 {
2911 	const unsigned int cp_coher_cntl =
2912 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
2913 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
2914 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
2915 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
2916 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
2917 
2918 	/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
2919 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
2920 	amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
2921 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
2922 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
2923 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
2924 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
2925 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
2926 }
2927 
2928 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
2929 					uint32_t pipe, bool enable)
2930 {
2931 	struct amdgpu_device *adev = ring->adev;
2932 	uint32_t val;
2933 	uint32_t wcl_cs_reg;
2934 
2935 	/* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
2936 	val = enable ? 0x1 : 0x7f;
2937 
2938 	switch (pipe) {
2939 	case 0:
2940 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
2941 		break;
2942 	case 1:
2943 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
2944 		break;
2945 	case 2:
2946 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
2947 		break;
2948 	case 3:
2949 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
2950 		break;
2951 	default:
2952 		DRM_DEBUG("invalid pipe %d\n", pipe);
2953 		return;
2954 	}
2955 
2956 	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
2957 
2958 }
2959 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
2960 {
2961 	struct amdgpu_device *adev = ring->adev;
2962 	uint32_t val;
2963 	int i;
2964 
2965 	/* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
2966 	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
2967 	 * around 25% of gpu resources.
2968 	 */
2969 	val = enable ? 0x1f : 0x07ffffff;
2970 	amdgpu_ring_emit_wreg(ring,
2971 			      SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
2972 			      val);
2973 
2974 	/* Restrict waves for normal/low priority compute queues as well
2975 	 * to get best QoS for high priority compute jobs.
2976 	 *
2977 	 * amdgpu controls only 1st ME(0-3 CS pipes).
2978 	 */
2979 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2980 		if (i != ring->pipe)
2981 			gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
2982 
2983 	}
2984 }
2985 
2986 enum amdgpu_gfx_cp_ras_mem_id {
2987 	AMDGPU_GFX_CP_MEM1 = 1,
2988 	AMDGPU_GFX_CP_MEM2,
2989 	AMDGPU_GFX_CP_MEM3,
2990 	AMDGPU_GFX_CP_MEM4,
2991 	AMDGPU_GFX_CP_MEM5,
2992 };
2993 
2994 enum amdgpu_gfx_gcea_ras_mem_id {
2995 	AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
2996 	AMDGPU_GFX_GCEA_IORD_CMDMEM,
2997 	AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
2998 	AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
2999 	AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3000 	AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3001 	AMDGPU_GFX_GCEA_MAM_DMEM0,
3002 	AMDGPU_GFX_GCEA_MAM_DMEM1,
3003 	AMDGPU_GFX_GCEA_MAM_DMEM2,
3004 	AMDGPU_GFX_GCEA_MAM_DMEM3,
3005 	AMDGPU_GFX_GCEA_MAM_AMEM0,
3006 	AMDGPU_GFX_GCEA_MAM_AMEM1,
3007 	AMDGPU_GFX_GCEA_MAM_AMEM2,
3008 	AMDGPU_GFX_GCEA_MAM_AMEM3,
3009 	AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3010 	AMDGPU_GFX_GCEA_WRET_TAGMEM,
3011 	AMDGPU_GFX_GCEA_RRET_TAGMEM,
3012 	AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3013 	AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3014 	AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3015 };
3016 
3017 enum amdgpu_gfx_gc_cane_ras_mem_id {
3018 	AMDGPU_GFX_GC_CANE_MEM0 = 0,
3019 };
3020 
3021 enum amdgpu_gfx_gcutcl2_ras_mem_id {
3022 	AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3023 };
3024 
3025 enum amdgpu_gfx_gds_ras_mem_id {
3026 	AMDGPU_GFX_GDS_MEM0 = 0,
3027 };
3028 
3029 enum amdgpu_gfx_lds_ras_mem_id {
3030 	AMDGPU_GFX_LDS_BANK0 = 0,
3031 	AMDGPU_GFX_LDS_BANK1,
3032 	AMDGPU_GFX_LDS_BANK2,
3033 	AMDGPU_GFX_LDS_BANK3,
3034 	AMDGPU_GFX_LDS_BANK4,
3035 	AMDGPU_GFX_LDS_BANK5,
3036 	AMDGPU_GFX_LDS_BANK6,
3037 	AMDGPU_GFX_LDS_BANK7,
3038 	AMDGPU_GFX_LDS_BANK8,
3039 	AMDGPU_GFX_LDS_BANK9,
3040 	AMDGPU_GFX_LDS_BANK10,
3041 	AMDGPU_GFX_LDS_BANK11,
3042 	AMDGPU_GFX_LDS_BANK12,
3043 	AMDGPU_GFX_LDS_BANK13,
3044 	AMDGPU_GFX_LDS_BANK14,
3045 	AMDGPU_GFX_LDS_BANK15,
3046 	AMDGPU_GFX_LDS_BANK16,
3047 	AMDGPU_GFX_LDS_BANK17,
3048 	AMDGPU_GFX_LDS_BANK18,
3049 	AMDGPU_GFX_LDS_BANK19,
3050 	AMDGPU_GFX_LDS_BANK20,
3051 	AMDGPU_GFX_LDS_BANK21,
3052 	AMDGPU_GFX_LDS_BANK22,
3053 	AMDGPU_GFX_LDS_BANK23,
3054 	AMDGPU_GFX_LDS_BANK24,
3055 	AMDGPU_GFX_LDS_BANK25,
3056 	AMDGPU_GFX_LDS_BANK26,
3057 	AMDGPU_GFX_LDS_BANK27,
3058 	AMDGPU_GFX_LDS_BANK28,
3059 	AMDGPU_GFX_LDS_BANK29,
3060 	AMDGPU_GFX_LDS_BANK30,
3061 	AMDGPU_GFX_LDS_BANK31,
3062 	AMDGPU_GFX_LDS_SP_BUFFER_A,
3063 	AMDGPU_GFX_LDS_SP_BUFFER_B,
3064 };
3065 
3066 enum amdgpu_gfx_rlc_ras_mem_id {
3067 	AMDGPU_GFX_RLC_GPMF32 = 1,
3068 	AMDGPU_GFX_RLC_RLCVF32,
3069 	AMDGPU_GFX_RLC_SCRATCH,
3070 	AMDGPU_GFX_RLC_SRM_ARAM,
3071 	AMDGPU_GFX_RLC_SRM_DRAM,
3072 	AMDGPU_GFX_RLC_TCTAG,
3073 	AMDGPU_GFX_RLC_SPM_SE,
3074 	AMDGPU_GFX_RLC_SPM_GRBMT,
3075 };
3076 
3077 enum amdgpu_gfx_sp_ras_mem_id {
3078 	AMDGPU_GFX_SP_SIMDID0 = 0,
3079 };
3080 
3081 enum amdgpu_gfx_spi_ras_mem_id {
3082 	AMDGPU_GFX_SPI_MEM0 = 0,
3083 	AMDGPU_GFX_SPI_MEM1,
3084 	AMDGPU_GFX_SPI_MEM2,
3085 	AMDGPU_GFX_SPI_MEM3,
3086 };
3087 
3088 enum amdgpu_gfx_sqc_ras_mem_id {
3089 	AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3090 	AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3091 	AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3092 	AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3093 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3094 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3095 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3096 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3097 	AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3098 	AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3099 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3100 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3101 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3102 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3103 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3104 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3105 	AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3106 	AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3107 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3108 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3109 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3110 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3111 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3112 };
3113 
3114 enum amdgpu_gfx_sq_ras_mem_id {
3115 	AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3116 	AMDGPU_GFX_SQ_SGPR_MEM1,
3117 	AMDGPU_GFX_SQ_SGPR_MEM2,
3118 	AMDGPU_GFX_SQ_SGPR_MEM3,
3119 };
3120 
3121 enum amdgpu_gfx_ta_ras_mem_id {
3122 	AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3123 	AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3124 	AMDGPU_GFX_TA_FS_CFIFO_RAM,
3125 	AMDGPU_GFX_TA_FSX_LFIFO,
3126 	AMDGPU_GFX_TA_FS_DFIFO_RAM,
3127 };
3128 
3129 enum amdgpu_gfx_tcc_ras_mem_id {
3130 	AMDGPU_GFX_TCC_MEM1 = 1,
3131 };
3132 
3133 enum amdgpu_gfx_tca_ras_mem_id {
3134 	AMDGPU_GFX_TCA_MEM1 = 1,
3135 };
3136 
3137 enum amdgpu_gfx_tci_ras_mem_id {
3138 	AMDGPU_GFX_TCIW_MEM = 1,
3139 };
3140 
3141 enum amdgpu_gfx_tcp_ras_mem_id {
3142 	AMDGPU_GFX_TCP_LFIFO0 = 1,
3143 	AMDGPU_GFX_TCP_SET0BANK0_RAM,
3144 	AMDGPU_GFX_TCP_SET0BANK1_RAM,
3145 	AMDGPU_GFX_TCP_SET0BANK2_RAM,
3146 	AMDGPU_GFX_TCP_SET0BANK3_RAM,
3147 	AMDGPU_GFX_TCP_SET1BANK0_RAM,
3148 	AMDGPU_GFX_TCP_SET1BANK1_RAM,
3149 	AMDGPU_GFX_TCP_SET1BANK2_RAM,
3150 	AMDGPU_GFX_TCP_SET1BANK3_RAM,
3151 	AMDGPU_GFX_TCP_SET2BANK0_RAM,
3152 	AMDGPU_GFX_TCP_SET2BANK1_RAM,
3153 	AMDGPU_GFX_TCP_SET2BANK2_RAM,
3154 	AMDGPU_GFX_TCP_SET2BANK3_RAM,
3155 	AMDGPU_GFX_TCP_SET3BANK0_RAM,
3156 	AMDGPU_GFX_TCP_SET3BANK1_RAM,
3157 	AMDGPU_GFX_TCP_SET3BANK2_RAM,
3158 	AMDGPU_GFX_TCP_SET3BANK3_RAM,
3159 	AMDGPU_GFX_TCP_VM_FIFO,
3160 	AMDGPU_GFX_TCP_DB_TAGRAM0,
3161 	AMDGPU_GFX_TCP_DB_TAGRAM1,
3162 	AMDGPU_GFX_TCP_DB_TAGRAM2,
3163 	AMDGPU_GFX_TCP_DB_TAGRAM3,
3164 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3165 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3166 	AMDGPU_GFX_TCP_CMD_FIFO,
3167 };
3168 
3169 enum amdgpu_gfx_td_ras_mem_id {
3170 	AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3171 	AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3172 	AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3173 };
3174 
3175 enum amdgpu_gfx_tcx_ras_mem_id {
3176 	AMDGPU_GFX_TCX_FIFOD0 = 0,
3177 	AMDGPU_GFX_TCX_FIFOD1,
3178 	AMDGPU_GFX_TCX_FIFOD2,
3179 	AMDGPU_GFX_TCX_FIFOD3,
3180 	AMDGPU_GFX_TCX_FIFOD4,
3181 	AMDGPU_GFX_TCX_FIFOD5,
3182 	AMDGPU_GFX_TCX_FIFOD6,
3183 	AMDGPU_GFX_TCX_FIFOD7,
3184 	AMDGPU_GFX_TCX_FIFOB0,
3185 	AMDGPU_GFX_TCX_FIFOB1,
3186 	AMDGPU_GFX_TCX_FIFOB2,
3187 	AMDGPU_GFX_TCX_FIFOB3,
3188 	AMDGPU_GFX_TCX_FIFOB4,
3189 	AMDGPU_GFX_TCX_FIFOB5,
3190 	AMDGPU_GFX_TCX_FIFOB6,
3191 	AMDGPU_GFX_TCX_FIFOB7,
3192 	AMDGPU_GFX_TCX_FIFOA0,
3193 	AMDGPU_GFX_TCX_FIFOA1,
3194 	AMDGPU_GFX_TCX_FIFOA2,
3195 	AMDGPU_GFX_TCX_FIFOA3,
3196 	AMDGPU_GFX_TCX_FIFOA4,
3197 	AMDGPU_GFX_TCX_FIFOA5,
3198 	AMDGPU_GFX_TCX_FIFOA6,
3199 	AMDGPU_GFX_TCX_FIFOA7,
3200 	AMDGPU_GFX_TCX_CFIFO0,
3201 	AMDGPU_GFX_TCX_CFIFO1,
3202 	AMDGPU_GFX_TCX_CFIFO2,
3203 	AMDGPU_GFX_TCX_CFIFO3,
3204 	AMDGPU_GFX_TCX_CFIFO4,
3205 	AMDGPU_GFX_TCX_CFIFO5,
3206 	AMDGPU_GFX_TCX_CFIFO6,
3207 	AMDGPU_GFX_TCX_CFIFO7,
3208 	AMDGPU_GFX_TCX_FIFO_ACKB0,
3209 	AMDGPU_GFX_TCX_FIFO_ACKB1,
3210 	AMDGPU_GFX_TCX_FIFO_ACKB2,
3211 	AMDGPU_GFX_TCX_FIFO_ACKB3,
3212 	AMDGPU_GFX_TCX_FIFO_ACKB4,
3213 	AMDGPU_GFX_TCX_FIFO_ACKB5,
3214 	AMDGPU_GFX_TCX_FIFO_ACKB6,
3215 	AMDGPU_GFX_TCX_FIFO_ACKB7,
3216 	AMDGPU_GFX_TCX_FIFO_ACKD0,
3217 	AMDGPU_GFX_TCX_FIFO_ACKD1,
3218 	AMDGPU_GFX_TCX_FIFO_ACKD2,
3219 	AMDGPU_GFX_TCX_FIFO_ACKD3,
3220 	AMDGPU_GFX_TCX_FIFO_ACKD4,
3221 	AMDGPU_GFX_TCX_FIFO_ACKD5,
3222 	AMDGPU_GFX_TCX_FIFO_ACKD6,
3223 	AMDGPU_GFX_TCX_FIFO_ACKD7,
3224 	AMDGPU_GFX_TCX_DST_FIFOA0,
3225 	AMDGPU_GFX_TCX_DST_FIFOA1,
3226 	AMDGPU_GFX_TCX_DST_FIFOA2,
3227 	AMDGPU_GFX_TCX_DST_FIFOA3,
3228 	AMDGPU_GFX_TCX_DST_FIFOA4,
3229 	AMDGPU_GFX_TCX_DST_FIFOA5,
3230 	AMDGPU_GFX_TCX_DST_FIFOA6,
3231 	AMDGPU_GFX_TCX_DST_FIFOA7,
3232 	AMDGPU_GFX_TCX_DST_FIFOB0,
3233 	AMDGPU_GFX_TCX_DST_FIFOB1,
3234 	AMDGPU_GFX_TCX_DST_FIFOB2,
3235 	AMDGPU_GFX_TCX_DST_FIFOB3,
3236 	AMDGPU_GFX_TCX_DST_FIFOB4,
3237 	AMDGPU_GFX_TCX_DST_FIFOB5,
3238 	AMDGPU_GFX_TCX_DST_FIFOB6,
3239 	AMDGPU_GFX_TCX_DST_FIFOB7,
3240 	AMDGPU_GFX_TCX_DST_FIFOD0,
3241 	AMDGPU_GFX_TCX_DST_FIFOD1,
3242 	AMDGPU_GFX_TCX_DST_FIFOD2,
3243 	AMDGPU_GFX_TCX_DST_FIFOD3,
3244 	AMDGPU_GFX_TCX_DST_FIFOD4,
3245 	AMDGPU_GFX_TCX_DST_FIFOD5,
3246 	AMDGPU_GFX_TCX_DST_FIFOD6,
3247 	AMDGPU_GFX_TCX_DST_FIFOD7,
3248 	AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3249 	AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3250 	AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3251 	AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3252 	AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3253 	AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3254 	AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3255 	AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3256 	AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3257 	AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3258 	AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3259 	AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3260 	AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3261 	AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3262 	AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3263 	AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3264 };
3265 
3266 enum amdgpu_gfx_atc_l2_ras_mem_id {
3267 	AMDGPU_GFX_ATC_L2_MEM0 = 0,
3268 };
3269 
3270 enum amdgpu_gfx_utcl2_ras_mem_id {
3271 	AMDGPU_GFX_UTCL2_MEM0 = 0,
3272 };
3273 
3274 enum amdgpu_gfx_vml2_ras_mem_id {
3275 	AMDGPU_GFX_VML2_MEM0 = 0,
3276 };
3277 
3278 enum amdgpu_gfx_vml2_walker_ras_mem_id {
3279 	AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3280 };
3281 
3282 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3283 	{AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3284 	{AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3285 	{AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3286 	{AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3287 	{AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3288 };
3289 
3290 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3291 	{AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3292 	{AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3293 	{AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3294 	{AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3295 	{AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3296 	{AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3297 	{AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3298 	{AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3299 	{AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3300 	{AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3301 	{AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3302 	{AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3303 	{AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3304 	{AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3305 	{AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3306 	{AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3307 	{AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3308 	{AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3309 	{AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3310 	{AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3311 };
3312 
3313 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3314 	{AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3315 };
3316 
3317 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
3318 	{AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
3319 };
3320 
3321 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
3322 	{AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
3323 };
3324 
3325 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
3326 	{AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
3327 	{AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
3328 	{AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
3329 	{AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
3330 	{AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
3331 	{AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
3332 	{AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
3333 	{AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
3334 	{AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
3335 	{AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
3336 	{AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
3337 	{AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
3338 	{AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
3339 	{AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
3340 	{AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
3341 	{AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
3342 	{AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
3343 	{AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
3344 	{AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
3345 	{AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
3346 	{AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
3347 	{AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
3348 	{AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
3349 	{AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
3350 	{AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
3351 	{AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
3352 	{AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
3353 	{AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
3354 	{AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
3355 	{AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
3356 	{AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
3357 	{AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
3358 	{AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
3359 	{AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
3360 };
3361 
3362 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
3363 	{AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
3364 	{AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
3365 	{AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
3366 	{AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
3367 	{AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
3368 	{AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
3369 	{AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
3370 	{AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
3371 };
3372 
3373 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
3374 	{AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
3375 };
3376 
3377 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
3378 	{AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
3379 	{AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
3380 	{AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
3381 	{AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
3382 };
3383 
3384 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
3385 	{AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
3386 	{AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
3387 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
3388 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
3389 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
3390 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
3391 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
3392 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
3393 	{AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
3394 	{AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
3395 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
3396 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
3397 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
3398 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
3399 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
3400 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
3401 	{AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
3402 	{AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
3403 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
3404 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
3405 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
3406 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
3407 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
3408 };
3409 
3410 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
3411 	{AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
3412 	{AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
3413 	{AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
3414 	{AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
3415 };
3416 
3417 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
3418 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
3419 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
3420 	{AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
3421 	{AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
3422 	{AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
3423 };
3424 
3425 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
3426 	{AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
3427 };
3428 
3429 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
3430 	{AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
3431 };
3432 
3433 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
3434 	{AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
3435 };
3436 
3437 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
3438 	{AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
3439 	{AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
3440 	{AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
3441 	{AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
3442 	{AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
3443 	{AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
3444 	{AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
3445 	{AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
3446 	{AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
3447 	{AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
3448 	{AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
3449 	{AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
3450 	{AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
3451 	{AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
3452 	{AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
3453 	{AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
3454 	{AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
3455 	{AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
3456 	{AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
3457 	{AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
3458 	{AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
3459 	{AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
3460 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
3461 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
3462 	{AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
3463 };
3464 
3465 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
3466 	{AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
3467 	{AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
3468 	{AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
3469 };
3470 
3471 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
3472 	{AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
3473 	{AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
3474 	{AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
3475 	{AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
3476 	{AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
3477 	{AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
3478 	{AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
3479 	{AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
3480 	{AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
3481 	{AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
3482 	{AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
3483 	{AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
3484 	{AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
3485 	{AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
3486 	{AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
3487 	{AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
3488 	{AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
3489 	{AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
3490 	{AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
3491 	{AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
3492 	{AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
3493 	{AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
3494 	{AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
3495 	{AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
3496 	{AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
3497 	{AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
3498 	{AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
3499 	{AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
3500 	{AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
3501 	{AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
3502 	{AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
3503 	{AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
3504 	{AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
3505 	{AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
3506 	{AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
3507 	{AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
3508 	{AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
3509 	{AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
3510 	{AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
3511 	{AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
3512 	{AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
3513 	{AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
3514 	{AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
3515 	{AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
3516 	{AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
3517 	{AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
3518 	{AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
3519 	{AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
3520 	{AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
3521 	{AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
3522 	{AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
3523 	{AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
3524 	{AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
3525 	{AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
3526 	{AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
3527 	{AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
3528 	{AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
3529 	{AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
3530 	{AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
3531 	{AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
3532 	{AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
3533 	{AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
3534 	{AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
3535 	{AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
3536 	{AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
3537 	{AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
3538 	{AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
3539 	{AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
3540 	{AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
3541 	{AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
3542 	{AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
3543 	{AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
3544 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
3545 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
3546 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
3547 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
3548 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
3549 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
3550 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
3551 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
3552 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
3553 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
3554 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
3555 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
3556 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
3557 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
3558 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
3559 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
3560 };
3561 
3562 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
3563 	{AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
3564 };
3565 
3566 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
3567 	{AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
3568 };
3569 
3570 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
3571 	{AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
3572 };
3573 
3574 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
3575 	{AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
3576 };
3577 
3578 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
3579 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
3580 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
3581 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
3582 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
3583 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
3584 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
3585 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
3586 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
3587 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
3588 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
3589 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
3590 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
3591 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
3592 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
3593 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
3594 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
3595 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
3596 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
3597 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
3598 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
3599 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
3600 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
3601 };
3602 
3603 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
3604 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
3605 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
3606 	    AMDGPU_GFX_RLC_MEM, 1},
3607 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
3608 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
3609 	    AMDGPU_GFX_CP_MEM, 1},
3610 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
3611 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
3612 	    AMDGPU_GFX_CP_MEM, 1},
3613 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
3614 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
3615 	    AMDGPU_GFX_CP_MEM, 1},
3616 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
3617 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
3618 	    AMDGPU_GFX_GDS_MEM, 1},
3619 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
3620 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
3621 	    AMDGPU_GFX_GC_CANE_MEM, 1},
3622 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
3623 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
3624 	    AMDGPU_GFX_SPI_MEM, 8},
3625 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
3626 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
3627 	    AMDGPU_GFX_SP_MEM, 1},
3628 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
3629 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
3630 	    AMDGPU_GFX_SP_MEM, 1},
3631 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
3632 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
3633 	    AMDGPU_GFX_SQ_MEM, 8},
3634 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
3635 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
3636 	    AMDGPU_GFX_SQC_MEM, 8},
3637 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
3638 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
3639 	    AMDGPU_GFX_TCX_MEM, 1},
3640 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
3641 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
3642 	    AMDGPU_GFX_TCC_MEM, 1},
3643 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
3644 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
3645 	    AMDGPU_GFX_TA_MEM, 8},
3646 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
3647 	    31, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
3648 	    AMDGPU_GFX_TCI_MEM, 1},
3649 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
3650 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
3651 	    AMDGPU_GFX_TCP_MEM, 8},
3652 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
3653 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
3654 	    AMDGPU_GFX_TD_MEM, 8},
3655 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
3656 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
3657 	    AMDGPU_GFX_GCEA_MEM, 1},
3658 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
3659 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
3660 	    AMDGPU_GFX_LDS_MEM, 1},
3661 };
3662 
3663 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
3664 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
3665 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
3666 	    AMDGPU_GFX_RLC_MEM, 1},
3667 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
3668 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
3669 	    AMDGPU_GFX_CP_MEM, 1},
3670 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
3671 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
3672 	    AMDGPU_GFX_CP_MEM, 1},
3673 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
3674 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
3675 	    AMDGPU_GFX_CP_MEM, 1},
3676 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
3677 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
3678 	    AMDGPU_GFX_GDS_MEM, 1},
3679 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
3680 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
3681 	    AMDGPU_GFX_GC_CANE_MEM, 1},
3682 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
3683 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
3684 	    AMDGPU_GFX_SPI_MEM, 8},
3685 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
3686 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
3687 	    AMDGPU_GFX_SP_MEM, 1},
3688 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
3689 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
3690 	    AMDGPU_GFX_SP_MEM, 1},
3691 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
3692 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
3693 	    AMDGPU_GFX_SQ_MEM, 8},
3694 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
3695 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
3696 	    AMDGPU_GFX_SQC_MEM, 8},
3697 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
3698 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
3699 	    AMDGPU_GFX_TCX_MEM, 1},
3700 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
3701 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
3702 	    AMDGPU_GFX_TCC_MEM, 1},
3703 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
3704 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
3705 	    AMDGPU_GFX_TA_MEM, 8},
3706 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
3707 	    31, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
3708 	    AMDGPU_GFX_TCI_MEM, 1},
3709 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
3710 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
3711 	    AMDGPU_GFX_TCP_MEM, 8},
3712 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
3713 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
3714 	    AMDGPU_GFX_TD_MEM, 8},
3715 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
3716 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
3717 	    AMDGPU_GFX_TCA_MEM, 1},
3718 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
3719 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
3720 	    AMDGPU_GFX_GCEA_MEM, 1},
3721 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
3722 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
3723 	    AMDGPU_GFX_LDS_MEM, 1},
3724 };
3725 
3726 static const struct soc15_reg_entry gfx_v9_4_3_ea_err_status_regs = {
3727 	SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16
3728 };
3729 
3730 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
3731 					void *ras_error_status, int xcc_id)
3732 {
3733 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
3734 	unsigned long ce_count = 0, ue_count = 0;
3735 	uint32_t i, j, k;
3736 
3737 	mutex_lock(&adev->grbm_idx_mutex);
3738 
3739 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
3740 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
3741 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
3742 				/* no need to select if instance number is 1 */
3743 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
3744 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
3745 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
3746 
3747 				amdgpu_ras_inst_query_ras_error_count(adev,
3748 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
3749 					1,
3750 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
3751 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
3752 					GET_INST(GC, xcc_id),
3753 					AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
3754 					&ce_count);
3755 
3756 				amdgpu_ras_inst_query_ras_error_count(adev,
3757 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
3758 					1,
3759 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
3760 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
3761 					GET_INST(GC, xcc_id),
3762 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
3763 					&ue_count);
3764 			}
3765 		}
3766 	}
3767 
3768 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3769 			xcc_id);
3770 	mutex_unlock(&adev->grbm_idx_mutex);
3771 
3772 	/* the caller should make sure initialize value of
3773 	 * err_data->ue_count and err_data->ce_count
3774 	 */
3775 	err_data->ce_count += ce_count;
3776 	err_data->ue_count += ue_count;
3777 }
3778 
3779 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
3780 					void *ras_error_status, int xcc_id)
3781 {
3782 	uint32_t i, j, k;
3783 
3784 	mutex_lock(&adev->grbm_idx_mutex);
3785 
3786 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
3787 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
3788 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
3789 				/* no need to select if instance number is 1 */
3790 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
3791 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
3792 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
3793 
3794 				amdgpu_ras_inst_reset_ras_error_count(adev,
3795 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
3796 					1,
3797 					GET_INST(GC, xcc_id));
3798 
3799 				amdgpu_ras_inst_reset_ras_error_count(adev,
3800 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
3801 					1,
3802 					GET_INST(GC, xcc_id));
3803 			}
3804 		}
3805 	}
3806 
3807 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3808 			xcc_id);
3809 	mutex_unlock(&adev->grbm_idx_mutex);
3810 }
3811 
3812 static void gfx_v9_4_3_inst_query_ea_err_status(struct amdgpu_device *adev,
3813 					int xcc_id)
3814 {
3815 	uint32_t i, j;
3816 	uint32_t reg_value;
3817 
3818 	mutex_lock(&adev->grbm_idx_mutex);
3819 
3820 	for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) {
3821 		for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) {
3822 			gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id);
3823 			reg_value = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
3824 					regGCEA_ERR_STATUS);
3825 			if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
3826 			    REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
3827 			    REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
3828 				dev_warn(adev->dev,
3829 					"GCEA err detected at instance: %d, status: 0x%x!\n",
3830 					j, reg_value);
3831 			}
3832 			/* clear after read */
3833 			reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS,
3834 						  CLEAR_ERROR_STATUS, 0x1);
3835 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS,
3836 					reg_value);
3837 		}
3838 	}
3839 
3840 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3841 			xcc_id);
3842 	mutex_unlock(&adev->grbm_idx_mutex);
3843 }
3844 
3845 static void gfx_v9_4_3_inst_query_utc_err_status(struct amdgpu_device *adev,
3846 					int xcc_id)
3847 {
3848 	uint32_t data;
3849 
3850 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS);
3851 	if (data) {
3852 		dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
3853 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3);
3854 	}
3855 
3856 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS);
3857 	if (data) {
3858 		dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
3859 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3);
3860 	}
3861 
3862 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
3863 				regVML2_WALKER_MEM_ECC_STATUS);
3864 	if (data) {
3865 		dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
3866 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS,
3867 				0x3);
3868 	}
3869 }
3870 
3871 static void gfx_v9_4_3_log_cu_timeout_status(struct amdgpu_device *adev,
3872 					uint32_t status, int xcc_id)
3873 {
3874 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
3875 	uint32_t i, simd, wave;
3876 	uint32_t wave_status;
3877 	uint32_t wave_pc_lo, wave_pc_hi;
3878 	uint32_t wave_exec_lo, wave_exec_hi;
3879 	uint32_t wave_inst_dw0, wave_inst_dw1;
3880 	uint32_t wave_ib_sts;
3881 
3882 	for (i = 0; i < 32; i++) {
3883 		if (!((i << 1) & status))
3884 			continue;
3885 
3886 		simd = i / cu_info->max_waves_per_simd;
3887 		wave = i % cu_info->max_waves_per_simd;
3888 
3889 		wave_status = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
3890 		wave_pc_lo = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
3891 		wave_pc_hi = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
3892 		wave_exec_lo =
3893 			wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
3894 		wave_exec_hi =
3895 			wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
3896 		wave_inst_dw0 =
3897 			wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
3898 		wave_inst_dw1 =
3899 			wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
3900 		wave_ib_sts = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
3901 
3902 		dev_info(
3903 			adev->dev,
3904 			"\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n",
3905 			simd, wave, wave_status,
3906 			((uint64_t)wave_pc_hi << 32 | wave_pc_lo),
3907 			((uint64_t)wave_exec_hi << 32 | wave_exec_lo),
3908 			((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0),
3909 			wave_ib_sts);
3910 	}
3911 }
3912 
3913 static void gfx_v9_4_3_inst_query_sq_timeout_status(struct amdgpu_device *adev,
3914 					int xcc_id)
3915 {
3916 	uint32_t se_idx, sh_idx, cu_idx;
3917 	uint32_t status;
3918 
3919 	mutex_lock(&adev->grbm_idx_mutex);
3920 	for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) {
3921 		for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) {
3922 			for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) {
3923 				gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx,
3924 							cu_idx, xcc_id);
3925 				status = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
3926 						      regSQ_TIMEOUT_STATUS);
3927 				if (status != 0) {
3928 					dev_info(
3929 						adev->dev,
3930 						"GFX Watchdog Timeout: SE %d, SH %d, CU %d\n",
3931 						se_idx, sh_idx, cu_idx);
3932 					gfx_v9_4_3_log_cu_timeout_status(
3933 						adev, status, xcc_id);
3934 				}
3935 				/* clear old status */
3936 				WREG32_SOC15(GC, GET_INST(GC, xcc_id),
3937 						regSQ_TIMEOUT_STATUS, 0);
3938 			}
3939 		}
3940 	}
3941 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3942 			xcc_id);
3943 	mutex_unlock(&adev->grbm_idx_mutex);
3944 }
3945 
3946 static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev,
3947 					void *ras_error_status, int xcc_id)
3948 {
3949 	gfx_v9_4_3_inst_query_ea_err_status(adev, xcc_id);
3950 	gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id);
3951 	gfx_v9_4_3_inst_query_sq_timeout_status(adev, xcc_id);
3952 }
3953 
3954 static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev,
3955 					int xcc_id)
3956 {
3957 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3);
3958 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3);
3959 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, 0x3);
3960 }
3961 
3962 static void gfx_v9_4_3_inst_reset_ea_err_status(struct amdgpu_device *adev,
3963 					int xcc_id)
3964 {
3965 	uint32_t i, j;
3966 	uint32_t value;
3967 
3968 	mutex_lock(&adev->grbm_idx_mutex);
3969 	for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) {
3970 		for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) {
3971 			gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id);
3972 			value = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS);
3973 			value = REG_SET_FIELD(value, GCEA_ERR_STATUS,
3974 						CLEAR_ERROR_STATUS, 0x1);
3975 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS, value);
3976 		}
3977 	}
3978 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3979 			xcc_id);
3980 	mutex_unlock(&adev->grbm_idx_mutex);
3981 }
3982 
3983 static void gfx_v9_4_3_inst_reset_sq_timeout_status(struct amdgpu_device *adev,
3984 					int xcc_id)
3985 {
3986 	uint32_t se_idx, sh_idx, cu_idx;
3987 
3988 	mutex_lock(&adev->grbm_idx_mutex);
3989 	for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) {
3990 		for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) {
3991 			for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) {
3992 				gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx,
3993 							cu_idx, xcc_id);
3994 				WREG32_SOC15(GC, GET_INST(GC, xcc_id),
3995 						regSQ_TIMEOUT_STATUS, 0);
3996 			}
3997 		}
3998 	}
3999 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4000 			xcc_id);
4001 	mutex_unlock(&adev->grbm_idx_mutex);
4002 }
4003 
4004 static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev,
4005 					void *ras_error_status, int xcc_id)
4006 {
4007 	gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id);
4008 	gfx_v9_4_3_inst_reset_ea_err_status(adev, xcc_id);
4009 	gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id);
4010 }
4011 
4012 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
4013 					void *ras_error_status)
4014 {
4015 	amdgpu_gfx_ras_error_func(adev, ras_error_status,
4016 			gfx_v9_4_3_inst_query_ras_err_count);
4017 }
4018 
4019 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
4020 {
4021 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
4022 }
4023 
4024 static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev)
4025 {
4026 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status);
4027 }
4028 
4029 static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev)
4030 {
4031 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status);
4032 }
4033 
4034 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
4035 	.name = "gfx_v9_4_3",
4036 	.early_init = gfx_v9_4_3_early_init,
4037 	.late_init = gfx_v9_4_3_late_init,
4038 	.sw_init = gfx_v9_4_3_sw_init,
4039 	.sw_fini = gfx_v9_4_3_sw_fini,
4040 	.hw_init = gfx_v9_4_3_hw_init,
4041 	.hw_fini = gfx_v9_4_3_hw_fini,
4042 	.suspend = gfx_v9_4_3_suspend,
4043 	.resume = gfx_v9_4_3_resume,
4044 	.is_idle = gfx_v9_4_3_is_idle,
4045 	.wait_for_idle = gfx_v9_4_3_wait_for_idle,
4046 	.soft_reset = gfx_v9_4_3_soft_reset,
4047 	.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4048 	.set_powergating_state = gfx_v9_4_3_set_powergating_state,
4049 	.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4050 };
4051 
4052 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4053 	.type = AMDGPU_RING_TYPE_COMPUTE,
4054 	.align_mask = 0xff,
4055 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4056 	.support_64bit_ptrs = true,
4057 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4058 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4059 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4060 	.emit_frame_size =
4061 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4062 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4063 		5 + /* hdp invalidate */
4064 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4065 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4066 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4067 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4068 		8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4069 		7 + /* gfx_v9_4_3_emit_mem_sync */
4070 		5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4071 		15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4072 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4073 	.emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4074 	.emit_fence = gfx_v9_4_3_ring_emit_fence,
4075 	.emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4076 	.emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4077 	.emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4078 	.emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4079 	.test_ring = gfx_v9_4_3_ring_test_ring,
4080 	.test_ib = gfx_v9_4_3_ring_test_ib,
4081 	.insert_nop = amdgpu_ring_insert_nop,
4082 	.pad_ib = amdgpu_ring_generic_pad_ib,
4083 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4084 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4085 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4086 	.emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4087 	.emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4088 };
4089 
4090 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4091 	.type = AMDGPU_RING_TYPE_KIQ,
4092 	.align_mask = 0xff,
4093 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4094 	.support_64bit_ptrs = true,
4095 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4096 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4097 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4098 	.emit_frame_size =
4099 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4100 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4101 		5 + /* hdp invalidate */
4102 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4103 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4104 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4105 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4106 		8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4107 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4108 	.emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4109 	.test_ring = gfx_v9_4_3_ring_test_ring,
4110 	.insert_nop = amdgpu_ring_insert_nop,
4111 	.pad_ib = amdgpu_ring_generic_pad_ib,
4112 	.emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4113 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4114 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4115 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4116 };
4117 
4118 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4119 {
4120 	int i, j, num_xcc;
4121 
4122 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4123 	for (i = 0; i < num_xcc; i++) {
4124 		adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4125 
4126 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
4127 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4128 					= &gfx_v9_4_3_ring_funcs_compute;
4129 	}
4130 }
4131 
4132 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4133 	.set = gfx_v9_4_3_set_eop_interrupt_state,
4134 	.process = gfx_v9_4_3_eop_irq,
4135 };
4136 
4137 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4138 	.set = gfx_v9_4_3_set_priv_reg_fault_state,
4139 	.process = gfx_v9_4_3_priv_reg_irq,
4140 };
4141 
4142 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4143 	.set = gfx_v9_4_3_set_priv_inst_fault_state,
4144 	.process = gfx_v9_4_3_priv_inst_irq,
4145 };
4146 
4147 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4148 {
4149 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4150 	adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4151 
4152 	adev->gfx.priv_reg_irq.num_types = 1;
4153 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4154 
4155 	adev->gfx.priv_inst_irq.num_types = 1;
4156 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4157 }
4158 
4159 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4160 {
4161 	adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4162 }
4163 
4164 
4165 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4166 {
4167 	/* init asci gds info */
4168 	switch (adev->ip_versions[GC_HWIP][0]) {
4169 	case IP_VERSION(9, 4, 3):
4170 		/* 9.4.3 removed all the GDS internal memory,
4171 		 * only support GWS opcode in kernel, like barrier
4172 		 * semaphore.etc */
4173 		adev->gds.gds_size = 0;
4174 		break;
4175 	default:
4176 		adev->gds.gds_size = 0x10000;
4177 		break;
4178 	}
4179 
4180 	switch (adev->ip_versions[GC_HWIP][0]) {
4181 	case IP_VERSION(9, 4, 3):
4182 		/* deprecated for 9.4.3, no usage at all */
4183 		adev->gds.gds_compute_max_wave_id = 0;
4184 		break;
4185 	default:
4186 		/* this really depends on the chip */
4187 		adev->gds.gds_compute_max_wave_id = 0x7ff;
4188 		break;
4189 	}
4190 
4191 	adev->gds.gws_size = 64;
4192 	adev->gds.oa_size = 16;
4193 }
4194 
4195 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4196 						 u32 bitmap)
4197 {
4198 	u32 data;
4199 
4200 	if (!bitmap)
4201 		return;
4202 
4203 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4204 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4205 
4206 	WREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG, data);
4207 }
4208 
4209 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
4210 {
4211 	u32 data, mask;
4212 
4213 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_GC_SHADER_ARRAY_CONFIG);
4214 	data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG);
4215 
4216 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4217 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4218 
4219 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4220 
4221 	return (~data) & mask;
4222 }
4223 
4224 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4225 				 struct amdgpu_cu_info *cu_info)
4226 {
4227 	int i, j, k, counter, active_cu_number = 0;
4228 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4229 	unsigned disable_masks[4 * 4];
4230 
4231 	if (!adev || !cu_info)
4232 		return -EINVAL;
4233 
4234 	/*
4235 	 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4236 	 */
4237 	if (adev->gfx.config.max_shader_engines *
4238 		adev->gfx.config.max_sh_per_se > 16)
4239 		return -EINVAL;
4240 
4241 	amdgpu_gfx_parse_disable_cu(disable_masks,
4242 				    adev->gfx.config.max_shader_engines,
4243 				    adev->gfx.config.max_sh_per_se);
4244 
4245 	mutex_lock(&adev->grbm_idx_mutex);
4246 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4247 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4248 			mask = 1;
4249 			ao_bitmap = 0;
4250 			counter = 0;
4251 			gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 0);
4252 			gfx_v9_4_3_set_user_cu_inactive_bitmap(
4253 				adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
4254 			bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev);
4255 
4256 			/*
4257 			 * The bitmap(and ao_cu_bitmap) in cu_info structure is
4258 			 * 4x4 size array, and it's usually suitable for Vega
4259 			 * ASICs which has 4*2 SE/SH layout.
4260 			 * But for Arcturus, SE/SH layout is changed to 8*1.
4261 			 * To mostly reduce the impact, we make it compatible
4262 			 * with current bitmap array as below:
4263 			 *    SE4,SH0 --> bitmap[0][1]
4264 			 *    SE5,SH0 --> bitmap[1][1]
4265 			 *    SE6,SH0 --> bitmap[2][1]
4266 			 *    SE7,SH0 --> bitmap[3][1]
4267 			 */
4268 			cu_info->bitmap[i % 4][j + i / 4] = bitmap;
4269 
4270 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4271 				if (bitmap & mask) {
4272 					if (counter < adev->gfx.config.max_cu_per_sh)
4273 						ao_bitmap |= mask;
4274 					counter++;
4275 				}
4276 				mask <<= 1;
4277 			}
4278 			active_cu_number += counter;
4279 			if (i < 2 && j < 2)
4280 				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4281 			cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
4282 		}
4283 	}
4284 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4285 				    0);
4286 	mutex_unlock(&adev->grbm_idx_mutex);
4287 
4288 	cu_info->number = active_cu_number;
4289 	cu_info->ao_cu_mask = ao_cu_mask;
4290 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4291 
4292 	return 0;
4293 }
4294 
4295 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
4296 	.type = AMD_IP_BLOCK_TYPE_GFX,
4297 	.major = 9,
4298 	.minor = 4,
4299 	.rev = 0,
4300 	.funcs = &gfx_v9_4_3_ip_funcs,
4301 };
4302 
4303 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
4304 {
4305 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4306 	uint32_t tmp_mask;
4307 	int i, r;
4308 
4309 	/* TODO : Initialize golden regs */
4310 	/* gfx_v9_4_3_init_golden_registers(adev); */
4311 
4312 	tmp_mask = inst_mask;
4313 	for_each_inst(i, tmp_mask)
4314 		gfx_v9_4_3_xcc_constants_init(adev, i);
4315 
4316 	tmp_mask = inst_mask;
4317 	for_each_inst(i, tmp_mask) {
4318 		r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
4319 		if (r)
4320 			return r;
4321 	}
4322 
4323 	tmp_mask = inst_mask;
4324 	for_each_inst(i, tmp_mask) {
4325 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
4326 		if (r)
4327 			return r;
4328 	}
4329 
4330 	return 0;
4331 }
4332 
4333 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
4334 {
4335 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4336 	int i;
4337 
4338 	for_each_inst(i, inst_mask)
4339 		gfx_v9_4_3_xcc_fini(adev, i);
4340 
4341 	return 0;
4342 }
4343 
4344 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
4345 	.suspend = &gfx_v9_4_3_xcp_suspend,
4346 	.resume = &gfx_v9_4_3_xcp_resume
4347 };
4348 
4349 struct amdgpu_ras_block_hw_ops  gfx_v9_4_3_ras_ops = {
4350 	.query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
4351 	.reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
4352 	.query_ras_error_status = &gfx_v9_4_3_query_ras_error_status,
4353 	.reset_ras_error_status = &gfx_v9_4_3_reset_ras_error_status,
4354 };
4355 
4356 struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
4357 	.ras_block = {
4358 		.hw_ops = &gfx_v9_4_3_ras_ops,
4359 	},
4360 };
4361