xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c (revision 83775e158a3d2dc437132ab357ed6c9214ef0ae9)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "soc15_common.h"
30 #include "vega10_enum.h"
31 
32 #include "v9_structs.h"
33 
34 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
35 
36 #include "gc/gc_9_4_3_offset.h"
37 #include "gc/gc_9_4_3_sh_mask.h"
38 
39 #include "gfx_v9_4_3.h"
40 #include "amdgpu_xcp.h"
41 
42 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
43 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
44 
45 #define GFX9_MEC_HPD_SIZE 4096
46 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
47 
48 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
49 
50 struct amdgpu_gfx_ras gfx_v9_4_3_ras;
51 
52 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
53 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
54 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
55 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
56 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
57 				struct amdgpu_cu_info *cu_info);
58 
59 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
60 				uint64_t queue_mask)
61 {
62 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
63 	amdgpu_ring_write(kiq_ring,
64 		PACKET3_SET_RESOURCES_VMID_MASK(0) |
65 		/* vmid_mask:0* queue_type:0 (KIQ) */
66 		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
67 	amdgpu_ring_write(kiq_ring,
68 			lower_32_bits(queue_mask));	/* queue mask lo */
69 	amdgpu_ring_write(kiq_ring,
70 			upper_32_bits(queue_mask));	/* queue mask hi */
71 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
72 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
73 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
74 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
75 }
76 
77 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
78 				 struct amdgpu_ring *ring)
79 {
80 	struct amdgpu_device *adev = kiq_ring->adev;
81 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
82 	uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
83 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
84 
85 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
86 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
87 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
88 			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
89 			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
90 			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
91 			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
92 			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
93 			 /*queue_type: normal compute queue */
94 			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
95 			 /* alloc format: all_on_one_pipe */
96 			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
97 			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
98 			 /* num_queues: must be 1 */
99 			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
100 	amdgpu_ring_write(kiq_ring,
101 			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
102 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
103 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
104 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
105 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
106 }
107 
108 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
109 				   struct amdgpu_ring *ring,
110 				   enum amdgpu_unmap_queues_action action,
111 				   u64 gpu_addr, u64 seq)
112 {
113 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
114 
115 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
116 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
117 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
118 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
119 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
120 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
121 	amdgpu_ring_write(kiq_ring,
122 			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
123 
124 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
125 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
126 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
127 		amdgpu_ring_write(kiq_ring, seq);
128 	} else {
129 		amdgpu_ring_write(kiq_ring, 0);
130 		amdgpu_ring_write(kiq_ring, 0);
131 		amdgpu_ring_write(kiq_ring, 0);
132 	}
133 }
134 
135 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
136 				   struct amdgpu_ring *ring,
137 				   u64 addr,
138 				   u64 seq)
139 {
140 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
141 
142 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
143 	amdgpu_ring_write(kiq_ring,
144 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
145 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
146 			  PACKET3_QUERY_STATUS_COMMAND(2));
147 	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
148 	amdgpu_ring_write(kiq_ring,
149 			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
150 			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
151 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
152 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
153 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
154 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
155 }
156 
157 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
158 				uint16_t pasid, uint32_t flush_type,
159 				bool all_hub)
160 {
161 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
162 	amdgpu_ring_write(kiq_ring,
163 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
164 			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
165 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
166 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
167 }
168 
169 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
170 	.kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
171 	.kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
172 	.kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
173 	.kiq_query_status = gfx_v9_4_3_kiq_query_status,
174 	.kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
175 	.set_resources_size = 8,
176 	.map_queues_size = 7,
177 	.unmap_queues_size = 6,
178 	.query_status_size = 7,
179 	.invalidate_tlbs_size = 2,
180 };
181 
182 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
183 {
184 	int i, num_xcc;
185 
186 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
187 	for (i = 0; i < num_xcc; i++)
188 		adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
189 }
190 
191 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
192 {
193 	int i, num_xcc, dev_inst;
194 
195 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
196 	for (i = 0; i < num_xcc; i++) {
197 		dev_inst = GET_INST(GC, i);
198 		if (dev_inst >= 2)
199 			WREG32_SOC15(GC, dev_inst, regGRBM_MCM_ADDR, 0x4);
200 
201 		/* Golden settings applied by driver for ASIC with rev_id 0 */
202 		if (adev->rev_id == 0) {
203 			WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
204 				     GOLDEN_GB_ADDR_CONFIG);
205 
206 			WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
207 					      REDUCE_FIFO_DEPTH_BY_2, 2);
208 		}
209 	}
210 }
211 
212 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
213 				       bool wc, uint32_t reg, uint32_t val)
214 {
215 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
216 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
217 				WRITE_DATA_DST_SEL(0) |
218 				(wc ? WR_CONFIRM : 0));
219 	amdgpu_ring_write(ring, reg);
220 	amdgpu_ring_write(ring, 0);
221 	amdgpu_ring_write(ring, val);
222 }
223 
224 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
225 				  int mem_space, int opt, uint32_t addr0,
226 				  uint32_t addr1, uint32_t ref, uint32_t mask,
227 				  uint32_t inv)
228 {
229 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
230 	amdgpu_ring_write(ring,
231 				 /* memory (1) or register (0) */
232 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
233 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
234 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
235 				 WAIT_REG_MEM_ENGINE(eng_sel)));
236 
237 	if (mem_space)
238 		BUG_ON(addr0 & 0x3); /* Dword align */
239 	amdgpu_ring_write(ring, addr0);
240 	amdgpu_ring_write(ring, addr1);
241 	amdgpu_ring_write(ring, ref);
242 	amdgpu_ring_write(ring, mask);
243 	amdgpu_ring_write(ring, inv); /* poll interval */
244 }
245 
246 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
247 {
248 	uint32_t scratch_reg0_offset, xcc_offset;
249 	struct amdgpu_device *adev = ring->adev;
250 	uint32_t tmp = 0;
251 	unsigned i;
252 	int r;
253 
254 	/* Use register offset which is local to XCC in the packet */
255 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
256 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
257 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
258 
259 	r = amdgpu_ring_alloc(ring, 3);
260 	if (r)
261 		return r;
262 
263 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
264 	amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
265 	amdgpu_ring_write(ring, 0xDEADBEEF);
266 	amdgpu_ring_commit(ring);
267 
268 	for (i = 0; i < adev->usec_timeout; i++) {
269 		tmp = RREG32(scratch_reg0_offset);
270 		if (tmp == 0xDEADBEEF)
271 			break;
272 		udelay(1);
273 	}
274 
275 	if (i >= adev->usec_timeout)
276 		r = -ETIMEDOUT;
277 	return r;
278 }
279 
280 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
281 {
282 	struct amdgpu_device *adev = ring->adev;
283 	struct amdgpu_ib ib;
284 	struct dma_fence *f = NULL;
285 
286 	unsigned index;
287 	uint64_t gpu_addr;
288 	uint32_t tmp;
289 	long r;
290 
291 	r = amdgpu_device_wb_get(adev, &index);
292 	if (r)
293 		return r;
294 
295 	gpu_addr = adev->wb.gpu_addr + (index * 4);
296 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
297 	memset(&ib, 0, sizeof(ib));
298 	r = amdgpu_ib_get(adev, NULL, 16,
299 			  AMDGPU_IB_POOL_DIRECT, &ib);
300 	if (r)
301 		goto err1;
302 
303 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
304 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
305 	ib.ptr[2] = lower_32_bits(gpu_addr);
306 	ib.ptr[3] = upper_32_bits(gpu_addr);
307 	ib.ptr[4] = 0xDEADBEEF;
308 	ib.length_dw = 5;
309 
310 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
311 	if (r)
312 		goto err2;
313 
314 	r = dma_fence_wait_timeout(f, false, timeout);
315 	if (r == 0) {
316 		r = -ETIMEDOUT;
317 		goto err2;
318 	} else if (r < 0) {
319 		goto err2;
320 	}
321 
322 	tmp = adev->wb.wb[index];
323 	if (tmp == 0xDEADBEEF)
324 		r = 0;
325 	else
326 		r = -EINVAL;
327 
328 err2:
329 	amdgpu_ib_free(adev, &ib, NULL);
330 	dma_fence_put(f);
331 err1:
332 	amdgpu_device_wb_free(adev, index);
333 	return r;
334 }
335 
336 
337 /* This value might differs per partition */
338 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
339 {
340 	uint64_t clock;
341 
342 	amdgpu_gfx_off_ctrl(adev, false);
343 	mutex_lock(&adev->gfx.gpu_clock_mutex);
344 	WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
345 	clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
346 		((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
347 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
348 	amdgpu_gfx_off_ctrl(adev, true);
349 
350 	return clock;
351 }
352 
353 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
354 {
355 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
356 	amdgpu_ucode_release(&adev->gfx.me_fw);
357 	amdgpu_ucode_release(&adev->gfx.ce_fw);
358 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
359 	amdgpu_ucode_release(&adev->gfx.mec_fw);
360 	amdgpu_ucode_release(&adev->gfx.mec2_fw);
361 
362 	kfree(adev->gfx.rlc.register_list_format);
363 }
364 
365 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
366 					  const char *chip_name)
367 {
368 	char fw_name[30];
369 	int err;
370 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
371 	uint16_t version_major;
372 	uint16_t version_minor;
373 
374 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
375 
376 	err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
377 	if (err)
378 		goto out;
379 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
380 
381 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
382 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
383 	err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
384 out:
385 	if (err)
386 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
387 
388 	return err;
389 }
390 
391 static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
392 {
393 	return true;
394 }
395 
396 static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
397 {
398 	if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
399 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
400 }
401 
402 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
403 					  const char *chip_name)
404 {
405 	char fw_name[30];
406 	int err;
407 
408 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
409 
410 	err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
411 	if (err)
412 		goto out;
413 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
414 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
415 
416 	adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
417 	adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
418 
419 	gfx_v9_4_3_check_if_need_gfxoff(adev);
420 
421 out:
422 	if (err)
423 		amdgpu_ucode_release(&adev->gfx.mec_fw);
424 	return err;
425 }
426 
427 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
428 {
429 	const char *chip_name;
430 	int r;
431 
432 	chip_name = "gc_9_4_3";
433 
434 	r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name);
435 	if (r)
436 		return r;
437 
438 	r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name);
439 	if (r)
440 		return r;
441 
442 	return r;
443 }
444 
445 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
446 {
447 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
448 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
449 }
450 
451 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
452 {
453 	int r, i, num_xcc;
454 	u32 *hpd;
455 	const __le32 *fw_data;
456 	unsigned fw_size;
457 	u32 *fw;
458 	size_t mec_hpd_size;
459 
460 	const struct gfx_firmware_header_v1_0 *mec_hdr;
461 
462 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
463 	for (i = 0; i < num_xcc; i++)
464 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
465 			AMDGPU_MAX_COMPUTE_QUEUES);
466 
467 	/* take ownership of the relevant compute queues */
468 	amdgpu_gfx_compute_queue_acquire(adev);
469 	mec_hpd_size =
470 		adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
471 	if (mec_hpd_size) {
472 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
473 					      AMDGPU_GEM_DOMAIN_VRAM |
474 					      AMDGPU_GEM_DOMAIN_GTT,
475 					      &adev->gfx.mec.hpd_eop_obj,
476 					      &adev->gfx.mec.hpd_eop_gpu_addr,
477 					      (void **)&hpd);
478 		if (r) {
479 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
480 			gfx_v9_4_3_mec_fini(adev);
481 			return r;
482 		}
483 
484 		if (amdgpu_emu_mode == 1) {
485 			for (i = 0; i < mec_hpd_size / 4; i++) {
486 				memset((void *)(hpd + i), 0, 4);
487 				if (i % 50 == 0)
488 					msleep(1);
489 			}
490 		} else {
491 			memset(hpd, 0, mec_hpd_size);
492 		}
493 
494 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
495 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
496 	}
497 
498 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
499 
500 	fw_data = (const __le32 *)
501 		(adev->gfx.mec_fw->data +
502 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
503 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
504 
505 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
506 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
507 				      &adev->gfx.mec.mec_fw_obj,
508 				      &adev->gfx.mec.mec_fw_gpu_addr,
509 				      (void **)&fw);
510 	if (r) {
511 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
512 		gfx_v9_4_3_mec_fini(adev);
513 		return r;
514 	}
515 
516 	memcpy(fw, fw_data, fw_size);
517 
518 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
519 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
520 
521 	return 0;
522 }
523 
524 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
525 					u32 sh_num, u32 instance, int xcc_id)
526 {
527 	u32 data;
528 
529 	if (instance == 0xffffffff)
530 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
531 				     INSTANCE_BROADCAST_WRITES, 1);
532 	else
533 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
534 				     INSTANCE_INDEX, instance);
535 
536 	if (se_num == 0xffffffff)
537 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
538 				     SE_BROADCAST_WRITES, 1);
539 	else
540 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
541 
542 	if (sh_num == 0xffffffff)
543 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
544 				     SH_BROADCAST_WRITES, 1);
545 	else
546 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
547 
548 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
549 }
550 
551 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
552 {
553 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
554 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
555 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
556 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
557 		(SQ_IND_INDEX__FORCE_READ_MASK));
558 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
559 }
560 
561 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
562 			   uint32_t wave, uint32_t thread,
563 			   uint32_t regno, uint32_t num, uint32_t *out)
564 {
565 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
566 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
567 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
568 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
569 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
570 		(SQ_IND_INDEX__FORCE_READ_MASK) |
571 		(SQ_IND_INDEX__AUTO_INCR_MASK));
572 	while (num--)
573 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
574 }
575 
576 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
577 				      uint32_t xcc_id, uint32_t simd, uint32_t wave,
578 				      uint32_t *dst, int *no_fields)
579 {
580 	/* type 1 wave data */
581 	dst[(*no_fields)++] = 1;
582 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
583 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
584 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
585 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
586 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
587 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
588 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
589 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
590 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
591 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
592 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
593 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
594 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
595 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
596 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
597 }
598 
599 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
600 				       uint32_t wave, uint32_t start,
601 				       uint32_t size, uint32_t *dst)
602 {
603 	wave_read_regs(adev, xcc_id, simd, wave, 0,
604 		       start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
605 }
606 
607 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
608 				       uint32_t wave, uint32_t thread,
609 				       uint32_t start, uint32_t size,
610 				       uint32_t *dst)
611 {
612 	wave_read_regs(adev, xcc_id, simd, wave, thread,
613 		       start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
614 }
615 
616 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
617 					u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
618 {
619 	soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
620 }
621 
622 
623 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
624 						int num_xccs_per_xcp)
625 {
626 	int ret;
627 
628 	ret = psp_spatial_partition(&adev->psp, NUM_XCC(adev->gfx.xcc_mask) /
629 							num_xccs_per_xcp);
630 	if (ret)
631 		return ret;
632 
633 	adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
634 
635 	return ret;
636 }
637 
638 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
639 {
640 	int xcc;
641 
642 	xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
643 	if (!xcc) {
644 		dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
645 		return -EINVAL;
646 	}
647 
648 	return xcc - 1;
649 }
650 
651 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
652 	.get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
653 	.select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
654 	.read_wave_data = &gfx_v9_4_3_read_wave_data,
655 	.read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
656 	.read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
657 	.select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
658 	.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
659 	.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
660 };
661 
662 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
663 {
664 	u32 gb_addr_config;
665 
666 	adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
667 	adev->gfx.ras = &gfx_v9_4_3_ras;
668 
669 	switch (adev->ip_versions[GC_HWIP][0]) {
670 	case IP_VERSION(9, 4, 3):
671 		adev->gfx.config.max_hw_contexts = 8;
672 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
673 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
674 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
675 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
676 		gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
677 		break;
678 	default:
679 		BUG();
680 		break;
681 	}
682 
683 	adev->gfx.config.gb_addr_config = gb_addr_config;
684 
685 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
686 			REG_GET_FIELD(
687 					adev->gfx.config.gb_addr_config,
688 					GB_ADDR_CONFIG,
689 					NUM_PIPES);
690 
691 	adev->gfx.config.max_tile_pipes =
692 		adev->gfx.config.gb_addr_config_fields.num_pipes;
693 
694 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
695 			REG_GET_FIELD(
696 					adev->gfx.config.gb_addr_config,
697 					GB_ADDR_CONFIG,
698 					NUM_BANKS);
699 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
700 			REG_GET_FIELD(
701 					adev->gfx.config.gb_addr_config,
702 					GB_ADDR_CONFIG,
703 					MAX_COMPRESSED_FRAGS);
704 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
705 			REG_GET_FIELD(
706 					adev->gfx.config.gb_addr_config,
707 					GB_ADDR_CONFIG,
708 					NUM_RB_PER_SE);
709 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
710 			REG_GET_FIELD(
711 					adev->gfx.config.gb_addr_config,
712 					GB_ADDR_CONFIG,
713 					NUM_SHADER_ENGINES);
714 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
715 			REG_GET_FIELD(
716 					adev->gfx.config.gb_addr_config,
717 					GB_ADDR_CONFIG,
718 					PIPE_INTERLEAVE_SIZE));
719 
720 	return 0;
721 }
722 
723 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
724 				        int xcc_id, int mec, int pipe, int queue)
725 {
726 	unsigned irq_type;
727 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
728 	unsigned int hw_prio;
729 	uint32_t xcc_doorbell_start;
730 
731 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
732 				       ring_id];
733 
734 	/* mec0 is me1 */
735 	ring->xcc_id = xcc_id;
736 	ring->me = mec + 1;
737 	ring->pipe = pipe;
738 	ring->queue = queue;
739 
740 	ring->ring_obj = NULL;
741 	ring->use_doorbell = true;
742 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
743 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
744 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
745 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
746 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
747 				     GFX9_MEC_HPD_SIZE;
748 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
749 	sprintf(ring->name, "comp_%d.%d.%d.%d",
750 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
751 
752 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
753 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
754 		+ ring->pipe;
755 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
756 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
757 	/* type-2 packets are deprecated on MEC, use type-3 instead */
758 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
759 				hw_prio, NULL);
760 }
761 
762 static int gfx_v9_4_3_sw_init(void *handle)
763 {
764 	int i, j, k, r, ring_id, xcc_id, num_xcc;
765 	struct amdgpu_kiq *kiq;
766 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
767 
768 	adev->gfx.mec.num_mec = 2;
769 	adev->gfx.mec.num_pipe_per_mec = 4;
770 	adev->gfx.mec.num_queue_per_pipe = 8;
771 
772 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
773 
774 	/* EOP Event */
775 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
776 	if (r)
777 		return r;
778 
779 	/* Privileged reg */
780 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
781 			      &adev->gfx.priv_reg_irq);
782 	if (r)
783 		return r;
784 
785 	/* Privileged inst */
786 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
787 			      &adev->gfx.priv_inst_irq);
788 	if (r)
789 		return r;
790 
791 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
792 
793 	r = adev->gfx.rlc.funcs->init(adev);
794 	if (r) {
795 		DRM_ERROR("Failed to init rlc BOs!\n");
796 		return r;
797 	}
798 
799 	r = gfx_v9_4_3_mec_init(adev);
800 	if (r) {
801 		DRM_ERROR("Failed to init MEC BOs!\n");
802 		return r;
803 	}
804 
805 	/* set up the compute queues - allocate horizontally across pipes */
806 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
807 		ring_id = 0;
808 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
809 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
810 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
811 				     k++) {
812 					if (!amdgpu_gfx_is_mec_queue_enabled(
813 							adev, xcc_id, i, k, j))
814 						continue;
815 
816 					r = gfx_v9_4_3_compute_ring_init(adev,
817 								       ring_id,
818 								       xcc_id,
819 								       i, k, j);
820 					if (r)
821 						return r;
822 
823 					ring_id++;
824 				}
825 			}
826 		}
827 
828 		r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
829 		if (r) {
830 			DRM_ERROR("Failed to init KIQ BOs!\n");
831 			return r;
832 		}
833 
834 		kiq = &adev->gfx.kiq[xcc_id];
835 		r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, xcc_id);
836 		if (r)
837 			return r;
838 
839 		/* create MQD for all compute queues as wel as KIQ for SRIOV case */
840 		r = amdgpu_gfx_mqd_sw_init(adev,
841 				sizeof(struct v9_mqd_allocation), xcc_id);
842 		if (r)
843 			return r;
844 	}
845 
846 	r = gfx_v9_4_3_gpu_early_init(adev);
847 	if (r)
848 		return r;
849 
850 	r = amdgpu_gfx_sysfs_init(adev);
851 	if (r)
852 		return r;
853 
854 	return amdgpu_gfx_ras_sw_init(adev);
855 }
856 
857 static int gfx_v9_4_3_sw_fini(void *handle)
858 {
859 	int i, num_xcc;
860 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
861 
862 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
863 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
864 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
865 
866 	for (i = 0; i < num_xcc; i++) {
867 		amdgpu_gfx_mqd_sw_fini(adev, i);
868 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
869 		amdgpu_gfx_kiq_fini(adev, i);
870 	}
871 
872 	gfx_v9_4_3_mec_fini(adev);
873 	amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
874 	gfx_v9_4_3_free_microcode(adev);
875 	amdgpu_gfx_sysfs_fini(adev);
876 
877 	return 0;
878 }
879 
880 #define DEFAULT_SH_MEM_BASES	(0x6000)
881 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
882 					     int xcc_id)
883 {
884 	int i;
885 	uint32_t sh_mem_config;
886 	uint32_t sh_mem_bases;
887 
888 	/*
889 	 * Configure apertures:
890 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
891 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
892 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
893 	 */
894 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
895 
896 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
897 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
898 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
899 
900 	mutex_lock(&adev->srbm_mutex);
901 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
902 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
903 		/* CP and shaders */
904 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
905 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
906 	}
907 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
908 	mutex_unlock(&adev->srbm_mutex);
909 
910 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
911 	   acccess. These should be enabled by FW for target VMIDs. */
912 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
913 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
914 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
915 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
916 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
917 	}
918 }
919 
920 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
921 {
922 	int vmid;
923 
924 	/*
925 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
926 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
927 	 * the driver can enable them for graphics. VMID0 should maintain
928 	 * access so that HWS firmware can save/restore entries.
929 	 */
930 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
931 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
932 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
933 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
934 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
935 	}
936 }
937 
938 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
939 					  int xcc_id)
940 {
941 	u32 tmp;
942 	int i;
943 
944 	/* XXX SH_MEM regs */
945 	/* where to put LDS, scratch, GPUVM in FSA64 space */
946 	mutex_lock(&adev->srbm_mutex);
947 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
948 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
949 		/* CP and shaders */
950 		if (i == 0) {
951 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
952 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
953 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
954 					    !!adev->gmc.noretry);
955 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
956 					 regSH_MEM_CONFIG, tmp);
957 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
958 					 regSH_MEM_BASES, 0);
959 		} else {
960 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
961 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
962 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
963 					    !!adev->gmc.noretry);
964 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
965 					 regSH_MEM_CONFIG, tmp);
966 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
967 					    (adev->gmc.private_aperture_start >>
968 					     48));
969 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
970 					    (adev->gmc.shared_aperture_start >>
971 					     48));
972 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
973 					 regSH_MEM_BASES, tmp);
974 		}
975 	}
976 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
977 
978 	mutex_unlock(&adev->srbm_mutex);
979 
980 	gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
981 	gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
982 }
983 
984 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
985 {
986 	int i, num_xcc;
987 
988 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
989 
990 	gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
991 	adev->gfx.config.db_debug2 =
992 		RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
993 
994 	for (i = 0; i < num_xcc; i++)
995 		gfx_v9_4_3_xcc_constants_init(adev, i);
996 }
997 
998 static void
999 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
1000 					   int xcc_id)
1001 {
1002 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1003 }
1004 
1005 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1006 {
1007 	/*
1008 	 * Rlc save restore list is workable since v2_1.
1009 	 * And it's needed by gfxoff feature.
1010 	 */
1011 	if (adev->gfx.rlc.is_rlc_v2_1)
1012 		gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1013 }
1014 
1015 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1016 {
1017 	uint32_t data;
1018 
1019 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1020 	data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1021 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1022 }
1023 
1024 static void gfx_v9_4_3_xcc_program_xcc_id(struct amdgpu_device *adev,
1025 					  int xcc_id)
1026 {
1027 	uint32_t tmp = 0;
1028 	int num_xcc;
1029 
1030 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1031 	switch (num_xcc) {
1032 	/* directly config VIRTUAL_XCC_ID to 0 for 1-XCC */
1033 	case 1:
1034 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, 0x8);
1035 		break;
1036 	case 2:
1037 	case 4:
1038 	case 6:
1039 	case 8:
1040 		tmp = (xcc_id % adev->gfx.num_xcc_per_xcp) << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, VIRTUAL_XCC_ID);
1041 		tmp = tmp | (adev->gfx.num_xcc_per_xcp << REG_FIELD_SHIFT(CP_HYP_XCP_CTL, NUM_XCC_IN_XCP));
1042 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HYP_XCP_CTL, tmp);
1043 
1044 		break;
1045 	default:
1046 		break;
1047 	}
1048 }
1049 
1050 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1051 {
1052 	uint32_t rlc_setting;
1053 
1054 	/* if RLC is not enabled, do nothing */
1055 	rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1056 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1057 		return false;
1058 
1059 	return true;
1060 }
1061 
1062 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1063 {
1064 	uint32_t data;
1065 	unsigned i;
1066 
1067 	data = RLC_SAFE_MODE__CMD_MASK;
1068 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1069 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1070 
1071 	/* wait for RLC_SAFE_MODE */
1072 	for (i = 0; i < adev->usec_timeout; i++) {
1073 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1074 			break;
1075 		udelay(1);
1076 	}
1077 }
1078 
1079 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1080 					   int xcc_id)
1081 {
1082 	uint32_t data;
1083 
1084 	data = RLC_SAFE_MODE__CMD_MASK;
1085 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1086 }
1087 
1088 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1089 {
1090 	/* init spm vmid with 0xf */
1091 	if (adev->gfx.rlc.funcs->update_spm_vmid)
1092 		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1093 
1094 	return 0;
1095 }
1096 
1097 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1098 					       int xcc_id)
1099 {
1100 	u32 i, j, k;
1101 	u32 mask;
1102 
1103 	mutex_lock(&adev->grbm_idx_mutex);
1104 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1105 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1106 			gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1107 						    xcc_id);
1108 			for (k = 0; k < adev->usec_timeout; k++) {
1109 				if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1110 					break;
1111 				udelay(1);
1112 			}
1113 			if (k == adev->usec_timeout) {
1114 				gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1115 							    0xffffffff,
1116 							    0xffffffff, xcc_id);
1117 				mutex_unlock(&adev->grbm_idx_mutex);
1118 				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1119 					 i, j);
1120 				return;
1121 			}
1122 		}
1123 	}
1124 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1125 				    xcc_id);
1126 	mutex_unlock(&adev->grbm_idx_mutex);
1127 
1128 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1129 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1130 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1131 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1132 	for (k = 0; k < adev->usec_timeout; k++) {
1133 		if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1134 			break;
1135 		udelay(1);
1136 	}
1137 }
1138 
1139 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1140 						     bool enable, int xcc_id)
1141 {
1142 	u32 tmp;
1143 
1144 	/* These interrupts should be enabled to drive DS clock */
1145 
1146 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1147 
1148 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1149 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1150 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1151 
1152 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1153 }
1154 
1155 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1156 {
1157 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1158 			      RLC_ENABLE_F32, 0);
1159 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1160 	gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1161 }
1162 
1163 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1164 {
1165 	int i, num_xcc;
1166 
1167 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1168 	for (i = 0; i < num_xcc; i++)
1169 		gfx_v9_4_3_xcc_rlc_stop(adev, i);
1170 }
1171 
1172 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1173 {
1174 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1175 			      SOFT_RESET_RLC, 1);
1176 	udelay(50);
1177 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1178 			      SOFT_RESET_RLC, 0);
1179 	udelay(50);
1180 }
1181 
1182 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1183 {
1184 	int i, num_xcc;
1185 
1186 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1187 	for (i = 0; i < num_xcc; i++)
1188 		gfx_v9_4_3_xcc_rlc_reset(adev, i);
1189 }
1190 
1191 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1192 {
1193 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1194 			      RLC_ENABLE_F32, 1);
1195 	udelay(50);
1196 
1197 	/* carrizo do enable cp interrupt after cp inited */
1198 	if (!(adev->flags & AMD_IS_APU)) {
1199 		gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1200 		udelay(50);
1201 	}
1202 }
1203 
1204 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1205 {
1206 #ifdef AMDGPU_RLC_DEBUG_RETRY
1207 	u32 rlc_ucode_ver;
1208 #endif
1209 	int i, num_xcc;
1210 
1211 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1212 	for (i = 0; i < num_xcc; i++) {
1213 		gfx_v9_4_3_xcc_rlc_start(adev, i);
1214 #ifdef AMDGPU_RLC_DEBUG_RETRY
1215 		/* RLC_GPM_GENERAL_6 : RLC Ucode version */
1216 		rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1217 		if (rlc_ucode_ver == 0x108) {
1218 			dev_info(adev->dev,
1219 				 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1220 				 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1221 			/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1222 			 * default is 0x9C4 to create a 100us interval */
1223 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1224 			/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1225 			 * to disable the page fault retry interrupts, default is
1226 			 * 0x100 (256) */
1227 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1228 		}
1229 #endif
1230 	}
1231 }
1232 
1233 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1234 					     int xcc_id)
1235 {
1236 	const struct rlc_firmware_header_v2_0 *hdr;
1237 	const __le32 *fw_data;
1238 	unsigned i, fw_size;
1239 
1240 	if (!adev->gfx.rlc_fw)
1241 		return -EINVAL;
1242 
1243 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1244 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1245 
1246 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1247 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1248 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1249 
1250 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1251 			RLCG_UCODE_LOADING_START_ADDRESS);
1252 	for (i = 0; i < fw_size; i++) {
1253 		if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1254 			dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1255 			msleep(1);
1256 		}
1257 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1258 	}
1259 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1260 
1261 	return 0;
1262 }
1263 
1264 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1265 {
1266 	int r;
1267 
1268 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1269 		gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1270 		/* legacy rlc firmware loading */
1271 		r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1272 		if (r)
1273 			return r;
1274 		gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1275 	}
1276 
1277 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1278 	/* disable CG */
1279 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1280 	gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1281 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1282 
1283 	return 0;
1284 }
1285 
1286 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1287 {
1288 	int r, i, num_xcc;
1289 
1290 	if (amdgpu_sriov_vf(adev))
1291 		return 0;
1292 
1293 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1294 	for (i = 0; i < num_xcc; i++) {
1295 		r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1296 		if (r)
1297 			return r;
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev,
1304 				       unsigned vmid)
1305 {
1306 	u32 reg, data;
1307 
1308 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
1309 	if (amdgpu_sriov_is_pp_one_vf(adev))
1310 		data = RREG32_NO_KIQ(reg);
1311 	else
1312 		data = RREG32(reg);
1313 
1314 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
1315 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1316 
1317 	if (amdgpu_sriov_is_pp_one_vf(adev))
1318 		WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1319 	else
1320 		WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1321 }
1322 
1323 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1324 	{SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1325 	{SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1326 };
1327 
1328 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1329 					uint32_t offset,
1330 					struct soc15_reg_rlcg *entries, int arr_size)
1331 {
1332 	int i, inst;
1333 	uint32_t reg;
1334 
1335 	if (!entries)
1336 		return false;
1337 
1338 	for (i = 0; i < arr_size; i++) {
1339 		const struct soc15_reg_rlcg *entry;
1340 
1341 		entry = &entries[i];
1342 		inst = adev->ip_map.logical_to_dev_inst ?
1343 			       adev->ip_map.logical_to_dev_inst(
1344 				       adev, entry->hwip, entry->instance) :
1345 			       entry->instance;
1346 		reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1347 		      entry->reg;
1348 		if (offset == reg)
1349 			return true;
1350 	}
1351 
1352 	return false;
1353 }
1354 
1355 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1356 {
1357 	return gfx_v9_4_3_check_rlcg_range(adev, offset,
1358 					(void *)rlcg_access_gc_9_4_3,
1359 					ARRAY_SIZE(rlcg_access_gc_9_4_3));
1360 }
1361 
1362 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1363 					     bool enable, int xcc_id)
1364 {
1365 	if (enable) {
1366 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1367 	} else {
1368 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1369 			(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1370 		adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1371 	}
1372 	udelay(50);
1373 }
1374 
1375 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1376 						    int xcc_id)
1377 {
1378 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1379 	const __le32 *fw_data;
1380 	unsigned i;
1381 	u32 tmp;
1382 	u32 mec_ucode_addr_offset;
1383 	u32 mec_ucode_data_offset;
1384 
1385 	if (!adev->gfx.mec_fw)
1386 		return -EINVAL;
1387 
1388 	gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1389 
1390 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1391 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1392 
1393 	fw_data = (const __le32 *)
1394 		(adev->gfx.mec_fw->data +
1395 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1396 	tmp = 0;
1397 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1398 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1399 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1400 
1401 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1402 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1403 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1404 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1405 
1406 	mec_ucode_addr_offset =
1407 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1408 	mec_ucode_data_offset =
1409 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1410 
1411 	/* MEC1 */
1412 	WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1413 	for (i = 0; i < mec_hdr->jt_size; i++)
1414 		WREG32(mec_ucode_data_offset,
1415 		       le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1416 
1417 	WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1418 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1419 
1420 	return 0;
1421 }
1422 
1423 /* KIQ functions */
1424 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1425 {
1426 	uint32_t tmp;
1427 	struct amdgpu_device *adev = ring->adev;
1428 
1429 	/* tell RLC which is KIQ queue */
1430 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1431 	tmp &= 0xffffff00;
1432 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1433 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1434 	tmp |= 0x80;
1435 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1436 }
1437 
1438 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1439 {
1440 	struct amdgpu_device *adev = ring->adev;
1441 
1442 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1443 		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1444 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1445 			mqd->cp_hqd_queue_priority =
1446 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1447 		}
1448 	}
1449 }
1450 
1451 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1452 {
1453 	struct amdgpu_device *adev = ring->adev;
1454 	struct v9_mqd *mqd = ring->mqd_ptr;
1455 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1456 	uint32_t tmp;
1457 
1458 	mqd->header = 0xC0310800;
1459 	mqd->compute_pipelinestat_enable = 0x00000001;
1460 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1461 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1462 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1463 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1464 	mqd->compute_misc_reserved = 0x00000003;
1465 
1466 	mqd->dynamic_cu_mask_addr_lo =
1467 		lower_32_bits(ring->mqd_gpu_addr
1468 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1469 	mqd->dynamic_cu_mask_addr_hi =
1470 		upper_32_bits(ring->mqd_gpu_addr
1471 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1472 
1473 	eop_base_addr = ring->eop_gpu_addr >> 8;
1474 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1475 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1476 
1477 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1478 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1479 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1480 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1481 
1482 	mqd->cp_hqd_eop_control = tmp;
1483 
1484 	/* enable doorbell? */
1485 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1486 
1487 	if (ring->use_doorbell) {
1488 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1489 				    DOORBELL_OFFSET, ring->doorbell_index);
1490 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1491 				    DOORBELL_EN, 1);
1492 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1493 				    DOORBELL_SOURCE, 0);
1494 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1495 				    DOORBELL_HIT, 0);
1496 	} else {
1497 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1498 					 DOORBELL_EN, 0);
1499 	}
1500 
1501 	mqd->cp_hqd_pq_doorbell_control = tmp;
1502 
1503 	/* disable the queue if it's active */
1504 	ring->wptr = 0;
1505 	mqd->cp_hqd_dequeue_request = 0;
1506 	mqd->cp_hqd_pq_rptr = 0;
1507 	mqd->cp_hqd_pq_wptr_lo = 0;
1508 	mqd->cp_hqd_pq_wptr_hi = 0;
1509 
1510 	/* set the pointer to the MQD */
1511 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1512 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1513 
1514 	/* set MQD vmid to 0 */
1515 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1516 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1517 	mqd->cp_mqd_control = tmp;
1518 
1519 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1520 	hqd_gpu_addr = ring->gpu_addr >> 8;
1521 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1522 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1523 
1524 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1525 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1526 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1527 			    (order_base_2(ring->ring_size / 4) - 1));
1528 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1529 			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1530 #ifdef __BIG_ENDIAN
1531 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1532 #endif
1533 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1534 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1535 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1536 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1537 	mqd->cp_hqd_pq_control = tmp;
1538 
1539 	/* set the wb address whether it's enabled or not */
1540 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1541 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1542 	mqd->cp_hqd_pq_rptr_report_addr_hi =
1543 		upper_32_bits(wb_gpu_addr) & 0xffff;
1544 
1545 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1546 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1547 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1548 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1549 
1550 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1551 	ring->wptr = 0;
1552 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1553 
1554 	/* set the vmid for the queue */
1555 	mqd->cp_hqd_vmid = 0;
1556 
1557 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1558 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1559 	mqd->cp_hqd_persistent_state = tmp;
1560 
1561 	/* set MIN_IB_AVAIL_SIZE */
1562 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1563 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1564 	mqd->cp_hqd_ib_control = tmp;
1565 
1566 	/* set static priority for a queue/ring */
1567 	gfx_v9_4_3_mqd_set_priority(ring, mqd);
1568 	mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1569 
1570 	/* map_queues packet doesn't need activate the queue,
1571 	 * so only kiq need set this field.
1572 	 */
1573 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1574 		mqd->cp_hqd_active = 1;
1575 
1576 	return 0;
1577 }
1578 
1579 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1580 					    int xcc_id)
1581 {
1582 	struct amdgpu_device *adev = ring->adev;
1583 	struct v9_mqd *mqd = ring->mqd_ptr;
1584 	int j;
1585 
1586 	/* disable wptr polling */
1587 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1588 
1589 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1590 	       mqd->cp_hqd_eop_base_addr_lo);
1591 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1592 	       mqd->cp_hqd_eop_base_addr_hi);
1593 
1594 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1595 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1596 	       mqd->cp_hqd_eop_control);
1597 
1598 	/* enable doorbell? */
1599 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1600 	       mqd->cp_hqd_pq_doorbell_control);
1601 
1602 	/* disable the queue if it's active */
1603 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1604 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1605 		for (j = 0; j < adev->usec_timeout; j++) {
1606 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1607 				break;
1608 			udelay(1);
1609 		}
1610 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1611 		       mqd->cp_hqd_dequeue_request);
1612 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1613 		       mqd->cp_hqd_pq_rptr);
1614 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1615 		       mqd->cp_hqd_pq_wptr_lo);
1616 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1617 		       mqd->cp_hqd_pq_wptr_hi);
1618 	}
1619 
1620 	/* set the pointer to the MQD */
1621 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
1622 	       mqd->cp_mqd_base_addr_lo);
1623 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
1624 	       mqd->cp_mqd_base_addr_hi);
1625 
1626 	/* set MQD vmid to 0 */
1627 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
1628 	       mqd->cp_mqd_control);
1629 
1630 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1631 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
1632 	       mqd->cp_hqd_pq_base_lo);
1633 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
1634 	       mqd->cp_hqd_pq_base_hi);
1635 
1636 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1637 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
1638 	       mqd->cp_hqd_pq_control);
1639 
1640 	/* set the wb address whether it's enabled or not */
1641 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
1642 				mqd->cp_hqd_pq_rptr_report_addr_lo);
1643 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
1644 				mqd->cp_hqd_pq_rptr_report_addr_hi);
1645 
1646 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1647 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
1648 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
1649 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
1650 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
1651 
1652 	/* enable the doorbell if requested */
1653 	if (ring->use_doorbell) {
1654 		WREG32_SOC15(
1655 			GC, GET_INST(GC, xcc_id),
1656 			regCP_MEC_DOORBELL_RANGE_LOWER,
1657 			((adev->doorbell_index.kiq +
1658 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
1659 			 2) << 2);
1660 		WREG32_SOC15(
1661 			GC, GET_INST(GC, xcc_id),
1662 			regCP_MEC_DOORBELL_RANGE_UPPER,
1663 			((adev->doorbell_index.userqueue_end +
1664 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
1665 			 2) << 2);
1666 	}
1667 
1668 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1669 	       mqd->cp_hqd_pq_doorbell_control);
1670 
1671 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1672 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1673 	       mqd->cp_hqd_pq_wptr_lo);
1674 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1675 	       mqd->cp_hqd_pq_wptr_hi);
1676 
1677 	/* set the vmid for the queue */
1678 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
1679 
1680 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
1681 	       mqd->cp_hqd_persistent_state);
1682 
1683 	/* activate the queue */
1684 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
1685 	       mqd->cp_hqd_active);
1686 
1687 	if (ring->use_doorbell)
1688 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
1689 
1690 	return 0;
1691 }
1692 
1693 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
1694 					    int xcc_id)
1695 {
1696 	struct amdgpu_device *adev = ring->adev;
1697 	int j;
1698 
1699 	/* disable the queue if it's active */
1700 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1701 
1702 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1703 
1704 		for (j = 0; j < adev->usec_timeout; j++) {
1705 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1706 				break;
1707 			udelay(1);
1708 		}
1709 
1710 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
1711 			DRM_DEBUG("%s dequeue request failed.\n", ring->name);
1712 
1713 			/* Manual disable if dequeue request times out */
1714 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
1715 		}
1716 
1717 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1718 		      0);
1719 	}
1720 
1721 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
1722 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
1723 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 0);
1724 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
1725 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
1726 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
1727 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
1728 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
1729 
1730 	return 0;
1731 }
1732 
1733 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
1734 {
1735 	struct amdgpu_device *adev = ring->adev;
1736 	struct v9_mqd *mqd = ring->mqd_ptr;
1737 	struct v9_mqd *tmp_mqd;
1738 
1739 	gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
1740 
1741 	/* GPU could be in bad state during probe, driver trigger the reset
1742 	 * after load the SMU, in this case , the mqd is not be initialized.
1743 	 * driver need to re-init the mqd.
1744 	 * check mqd->cp_hqd_pq_control since this value should not be 0
1745 	 */
1746 	tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
1747 	if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
1748 		/* for GPU_RESET case , reset MQD to a clean status */
1749 		if (adev->gfx.kiq[xcc_id].mqd_backup)
1750 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
1751 
1752 		/* reset ring buffer */
1753 		ring->wptr = 0;
1754 		amdgpu_ring_clear_ring(ring);
1755 		mutex_lock(&adev->srbm_mutex);
1756 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1757 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
1758 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1759 		mutex_unlock(&adev->srbm_mutex);
1760 	} else {
1761 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
1762 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
1763 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
1764 		mutex_lock(&adev->srbm_mutex);
1765 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1766 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
1767 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
1768 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1769 		mutex_unlock(&adev->srbm_mutex);
1770 
1771 		if (adev->gfx.kiq[xcc_id].mqd_backup)
1772 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
1773 	}
1774 
1775 	return 0;
1776 }
1777 
1778 static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
1779 {
1780 	struct amdgpu_device *adev = ring->adev;
1781 	struct v9_mqd *mqd = ring->mqd_ptr;
1782 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
1783 	struct v9_mqd *tmp_mqd;
1784 
1785 	/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
1786 	 * is not be initialized before
1787 	 */
1788 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
1789 
1790 	if (!tmp_mqd->cp_hqd_pq_control ||
1791 	    (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
1792 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
1793 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
1794 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
1795 		mutex_lock(&adev->srbm_mutex);
1796 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1797 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
1798 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1799 		mutex_unlock(&adev->srbm_mutex);
1800 
1801 		if (adev->gfx.mec.mqd_backup[mqd_idx])
1802 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
1803 	} else {
1804 		/* restore MQD to a clean status */
1805 		if (adev->gfx.mec.mqd_backup[mqd_idx])
1806 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
1807 		/* reset ring buffer */
1808 		ring->wptr = 0;
1809 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
1810 		amdgpu_ring_clear_ring(ring);
1811 	}
1812 
1813 	return 0;
1814 }
1815 
1816 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
1817 {
1818 	struct amdgpu_ring *ring;
1819 	int j;
1820 
1821 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
1822 		ring = &adev->gfx.compute_ring[j +  xcc_id * adev->gfx.num_compute_rings];
1823 		if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
1824 			mutex_lock(&adev->srbm_mutex);
1825 			soc15_grbm_select(adev, ring->me,
1826 					ring->pipe,
1827 					ring->queue, 0, GET_INST(GC, xcc_id));
1828 			gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
1829 			soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1830 			mutex_unlock(&adev->srbm_mutex);
1831 		}
1832 	}
1833 
1834 	return 0;
1835 }
1836 
1837 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
1838 {
1839 	struct amdgpu_ring *ring;
1840 	int r;
1841 
1842 	ring = &adev->gfx.kiq[xcc_id].ring;
1843 
1844 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
1845 	if (unlikely(r != 0))
1846 		return r;
1847 
1848 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
1849 	if (unlikely(r != 0)) {
1850 		amdgpu_bo_unreserve(ring->mqd_obj);
1851 		return r;
1852 	}
1853 
1854 	gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
1855 	amdgpu_bo_kunmap(ring->mqd_obj);
1856 	ring->mqd_ptr = NULL;
1857 	amdgpu_bo_unreserve(ring->mqd_obj);
1858 	return 0;
1859 }
1860 
1861 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
1862 {
1863 	struct amdgpu_ring *ring = NULL;
1864 	int r = 0, i;
1865 
1866 	gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
1867 
1868 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1869 		ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
1870 
1871 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
1872 		if (unlikely(r != 0))
1873 			goto done;
1874 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
1875 		if (!r) {
1876 			r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id);
1877 			amdgpu_bo_kunmap(ring->mqd_obj);
1878 			ring->mqd_ptr = NULL;
1879 		}
1880 		amdgpu_bo_unreserve(ring->mqd_obj);
1881 		if (r)
1882 			goto done;
1883 	}
1884 
1885 	r = amdgpu_gfx_enable_kcq(adev, xcc_id);
1886 done:
1887 	return r;
1888 }
1889 
1890 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
1891 {
1892 	struct amdgpu_ring *ring;
1893 	int r, j;
1894 
1895 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1896 
1897 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1898 		gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
1899 
1900 		r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
1901 		if (r)
1902 			return r;
1903 	}
1904 
1905 	/* set the virtual and physical id based on partition_mode */
1906 	gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id);
1907 
1908 	r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
1909 	if (r)
1910 		return r;
1911 
1912 	r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
1913 	if (r)
1914 		return r;
1915 
1916 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
1917 		ring = &adev->gfx.compute_ring
1918 				[j + xcc_id * adev->gfx.num_compute_rings];
1919 		r = amdgpu_ring_test_helper(ring);
1920 		if (r)
1921 			return r;
1922 	}
1923 
1924 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1925 
1926 	return 0;
1927 }
1928 
1929 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
1930 {
1931 	int r = 0, i, num_xcc;
1932 
1933 	if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
1934 					    AMDGPU_XCP_FL_NONE) ==
1935 	    AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
1936 		r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr,
1937 						     amdgpu_user_partt_mode);
1938 
1939 	if (r)
1940 		return r;
1941 
1942 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1943 	for (i = 0; i < num_xcc; i++) {
1944 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
1945 		if (r)
1946 			return r;
1947 	}
1948 
1949 	return 0;
1950 }
1951 
1952 static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable,
1953 				     int xcc_id)
1954 {
1955 	gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id);
1956 }
1957 
1958 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
1959 {
1960 	if (amdgpu_gfx_disable_kcq(adev, xcc_id))
1961 		DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
1962 
1963 	/* Use deinitialize sequence from CAIL when unbinding device
1964 	 * from driver, otherwise KIQ is hanging when binding back
1965 	 */
1966 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
1967 		mutex_lock(&adev->srbm_mutex);
1968 		soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
1969 				  adev->gfx.kiq[xcc_id].ring.pipe,
1970 				  adev->gfx.kiq[xcc_id].ring.queue, 0,
1971 				  GET_INST(GC, xcc_id));
1972 		gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
1973 						 xcc_id);
1974 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1975 		mutex_unlock(&adev->srbm_mutex);
1976 	}
1977 
1978 	gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
1979 	gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id);
1980 }
1981 
1982 static int gfx_v9_4_3_hw_init(void *handle)
1983 {
1984 	int r;
1985 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1986 
1987 	gfx_v9_4_3_init_golden_registers(adev);
1988 
1989 	gfx_v9_4_3_constants_init(adev);
1990 
1991 	r = adev->gfx.rlc.funcs->resume(adev);
1992 	if (r)
1993 		return r;
1994 
1995 	r = gfx_v9_4_3_cp_resume(adev);
1996 	if (r)
1997 		return r;
1998 
1999 	return r;
2000 }
2001 
2002 static int gfx_v9_4_3_hw_fini(void *handle)
2003 {
2004 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2005 	int i, num_xcc;
2006 
2007 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2008 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2009 
2010 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2011 	for (i = 0; i < num_xcc; i++) {
2012 		gfx_v9_4_3_xcc_fini(adev, i);
2013 	}
2014 
2015 	return 0;
2016 }
2017 
2018 static int gfx_v9_4_3_suspend(void *handle)
2019 {
2020 	return gfx_v9_4_3_hw_fini(handle);
2021 }
2022 
2023 static int gfx_v9_4_3_resume(void *handle)
2024 {
2025 	return gfx_v9_4_3_hw_init(handle);
2026 }
2027 
2028 static bool gfx_v9_4_3_is_idle(void *handle)
2029 {
2030 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2031 	int i, num_xcc;
2032 
2033 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2034 	for (i = 0; i < num_xcc; i++) {
2035 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2036 					GRBM_STATUS, GUI_ACTIVE))
2037 			return false;
2038 	}
2039 	return true;
2040 }
2041 
2042 static int gfx_v9_4_3_wait_for_idle(void *handle)
2043 {
2044 	unsigned i;
2045 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2046 
2047 	for (i = 0; i < adev->usec_timeout; i++) {
2048 		if (gfx_v9_4_3_is_idle(handle))
2049 			return 0;
2050 		udelay(1);
2051 	}
2052 	return -ETIMEDOUT;
2053 }
2054 
2055 static int gfx_v9_4_3_soft_reset(void *handle)
2056 {
2057 	u32 grbm_soft_reset = 0;
2058 	u32 tmp;
2059 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2060 
2061 	/* GRBM_STATUS */
2062 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2063 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2064 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2065 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2066 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2067 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2068 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2069 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2070 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2071 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2072 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2073 	}
2074 
2075 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2076 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2077 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2078 	}
2079 
2080 	/* GRBM_STATUS2 */
2081 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2082 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2083 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2084 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2085 
2086 
2087 	if (grbm_soft_reset) {
2088 		/* stop the rlc */
2089 		adev->gfx.rlc.funcs->stop(adev);
2090 
2091 		/* Disable MEC parsing/prefetching */
2092 		gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2093 
2094 		if (grbm_soft_reset) {
2095 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2096 			tmp |= grbm_soft_reset;
2097 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2098 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2099 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2100 
2101 			udelay(50);
2102 
2103 			tmp &= ~grbm_soft_reset;
2104 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2105 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2106 		}
2107 
2108 		/* Wait a little for things to settle down */
2109 		udelay(50);
2110 	}
2111 	return 0;
2112 }
2113 
2114 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2115 					  uint32_t vmid,
2116 					  uint32_t gds_base, uint32_t gds_size,
2117 					  uint32_t gws_base, uint32_t gws_size,
2118 					  uint32_t oa_base, uint32_t oa_size)
2119 {
2120 	struct amdgpu_device *adev = ring->adev;
2121 
2122 	/* GDS Base */
2123 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2124 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2125 				   gds_base);
2126 
2127 	/* GDS Size */
2128 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2129 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2130 				   gds_size);
2131 
2132 	/* GWS */
2133 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2134 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2135 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2136 
2137 	/* OA */
2138 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2139 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2140 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
2141 }
2142 
2143 static int gfx_v9_4_3_early_init(void *handle)
2144 {
2145 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2146 
2147 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2148 					  AMDGPU_MAX_COMPUTE_RINGS);
2149 	gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2150 	gfx_v9_4_3_set_ring_funcs(adev);
2151 	gfx_v9_4_3_set_irq_funcs(adev);
2152 	gfx_v9_4_3_set_gds_init(adev);
2153 	gfx_v9_4_3_set_rlc_funcs(adev);
2154 
2155 	return gfx_v9_4_3_init_microcode(adev);
2156 }
2157 
2158 static int gfx_v9_4_3_late_init(void *handle)
2159 {
2160 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2161 	int r;
2162 
2163 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2164 	if (r)
2165 		return r;
2166 
2167 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2168 	if (r)
2169 		return r;
2170 
2171 	return 0;
2172 }
2173 
2174 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2175 					    bool enable, int xcc_id)
2176 {
2177 	uint32_t def, data;
2178 
2179 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2180 		return;
2181 
2182 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2183 				  regRLC_CGTT_MGCG_OVERRIDE);
2184 
2185 	if (enable)
2186 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2187 	else
2188 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2189 
2190 	if (def != data)
2191 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2192 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2193 
2194 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL);
2195 
2196 	if (enable)
2197 		data &= ~RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
2198 	else
2199 		data |= RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
2200 
2201 	if (def != data)
2202 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL, data);
2203 }
2204 
2205 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2206 						bool enable, int xcc_id)
2207 {
2208 	uint32_t def, data;
2209 
2210 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2211 		return;
2212 
2213 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2214 				  regRLC_CGTT_MGCG_OVERRIDE);
2215 
2216 	if (enable)
2217 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2218 	else
2219 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2220 
2221 	if (def != data)
2222 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2223 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2224 }
2225 
2226 static void
2227 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2228 						bool enable, int xcc_id)
2229 {
2230 	uint32_t data, def;
2231 
2232 	/* It is disabled by HW by default */
2233 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2234 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
2235 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2236 
2237 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2238 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2239 			  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2240 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2241 
2242 		if (def != data)
2243 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2244 
2245 		/* MGLS is a global flag to control all MGLS in GFX */
2246 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2247 			/* 2 - RLC memory Light sleep */
2248 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2249 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2250 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2251 				if (def != data)
2252 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2253 			}
2254 			/* 3 - CP memory Light sleep */
2255 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2256 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2257 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2258 				if (def != data)
2259 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2260 			}
2261 		}
2262 	} else {
2263 		/* 1 - MGCG_OVERRIDE */
2264 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2265 
2266 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2267 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2268 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2269 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2270 
2271 		if (def != data)
2272 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2273 
2274 		/* 2 - disable MGLS in RLC */
2275 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2276 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2277 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2278 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2279 		}
2280 
2281 		/* 3 - disable MGLS in CP */
2282 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2283 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2284 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2285 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2286 		}
2287 	}
2288 
2289 }
2290 
2291 static void
2292 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2293 						bool enable, int xcc_id)
2294 {
2295 	uint32_t def, data;
2296 
2297 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2298 
2299 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2300 		/* unset CGCG override */
2301 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2302 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2303 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2304 		else
2305 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2306 		/* update CGCG and CGLS override bits */
2307 		if (def != data)
2308 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2309 
2310 		/* enable cgcg FSM(0x0000363F) */
2311 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2312 
2313 		data = (0x36
2314 			<< RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2315 		       RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2316 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2317 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2318 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2319 		if (def != data)
2320 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2321 
2322 		/* set IDLE_POLL_COUNT(0x00900100) */
2323 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2324 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2325 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2326 		if (def != data)
2327 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2328 	} else {
2329 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2330 		/* reset CGCG/CGLS bits */
2331 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2332 		/* disable cgcg and cgls in FSM */
2333 		if (def != data)
2334 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2335 	}
2336 
2337 }
2338 
2339 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2340 						  bool enable, int xcc_id)
2341 {
2342 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2343 
2344 	if (enable) {
2345 		/* FGCG */
2346 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2347 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2348 
2349 		/* CGCG/CGLS should be enabled after MGCG/MGLS
2350 		 * ===  MGCG + MGLS ===
2351 		 */
2352 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2353 								xcc_id);
2354 		/* ===  CGCG + CGLS === */
2355 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2356 								xcc_id);
2357 	} else {
2358 		/* CGCG/CGLS should be disabled before MGCG/MGLS
2359 		 * ===  CGCG + CGLS ===
2360 		 */
2361 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2362 								xcc_id);
2363 		/* ===  MGCG + MGLS === */
2364 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2365 								xcc_id);
2366 
2367 		/* FGCG */
2368 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2369 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2370 	}
2371 
2372 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2373 
2374 	return 0;
2375 }
2376 
2377 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2378 	.is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2379 	.set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2380 	.unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2381 	.init = gfx_v9_4_3_rlc_init,
2382 	.resume = gfx_v9_4_3_rlc_resume,
2383 	.stop = gfx_v9_4_3_rlc_stop,
2384 	.reset = gfx_v9_4_3_rlc_reset,
2385 	.start = gfx_v9_4_3_rlc_start,
2386 	.update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2387 	.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2388 };
2389 
2390 static int gfx_v9_4_3_set_powergating_state(void *handle,
2391 					  enum amd_powergating_state state)
2392 {
2393 	return 0;
2394 }
2395 
2396 static int gfx_v9_4_3_set_clockgating_state(void *handle,
2397 					  enum amd_clockgating_state state)
2398 {
2399 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2400 	int i, num_xcc;
2401 
2402 	if (amdgpu_sriov_vf(adev))
2403 		return 0;
2404 
2405 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2406 	switch (adev->ip_versions[GC_HWIP][0]) {
2407 	case IP_VERSION(9, 4, 3):
2408 		for (i = 0; i < num_xcc; i++)
2409 			gfx_v9_4_3_xcc_update_gfx_clock_gating(
2410 				adev, state == AMD_CG_STATE_GATE, i);
2411 		break;
2412 	default:
2413 		break;
2414 	}
2415 	return 0;
2416 }
2417 
2418 static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
2419 {
2420 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2421 	int data;
2422 
2423 	if (amdgpu_sriov_vf(adev))
2424 		*flags = 0;
2425 
2426 	/* AMD_CG_SUPPORT_GFX_MGCG */
2427 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2428 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2429 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
2430 
2431 	/* AMD_CG_SUPPORT_GFX_CGCG */
2432 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2433 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2434 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
2435 
2436 	/* AMD_CG_SUPPORT_GFX_CGLS */
2437 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2438 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
2439 
2440 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
2441 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2442 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2443 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2444 
2445 	/* AMD_CG_SUPPORT_GFX_CP_LS */
2446 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2447 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2448 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2449 }
2450 
2451 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2452 {
2453 	struct amdgpu_device *adev = ring->adev;
2454 	u32 ref_and_mask, reg_mem_engine;
2455 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
2456 
2457 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2458 		switch (ring->me) {
2459 		case 1:
2460 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2461 			break;
2462 		case 2:
2463 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2464 			break;
2465 		default:
2466 			return;
2467 		}
2468 		reg_mem_engine = 0;
2469 	} else {
2470 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2471 		reg_mem_engine = 1; /* pfp */
2472 	}
2473 
2474 	gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2475 			      adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2476 			      adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2477 			      ref_and_mask, ref_and_mask, 0x20);
2478 }
2479 
2480 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2481 					  struct amdgpu_job *job,
2482 					  struct amdgpu_ib *ib,
2483 					  uint32_t flags)
2484 {
2485 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2486 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2487 
2488 	/* Currently, there is a high possibility to get wave ID mismatch
2489 	 * between ME and GDS, leading to a hw deadlock, because ME generates
2490 	 * different wave IDs than the GDS expects. This situation happens
2491 	 * randomly when at least 5 compute pipes use GDS ordered append.
2492 	 * The wave IDs generated by ME are also wrong after suspend/resume.
2493 	 * Those are probably bugs somewhere else in the kernel driver.
2494 	 *
2495 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2496 	 * GDS to 0 for this ring (me/pipe).
2497 	 */
2498 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2499 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2500 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2501 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2502 	}
2503 
2504 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2505 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2506 	amdgpu_ring_write(ring,
2507 #ifdef __BIG_ENDIAN
2508 				(2 << 0) |
2509 #endif
2510 				lower_32_bits(ib->gpu_addr));
2511 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2512 	amdgpu_ring_write(ring, control);
2513 }
2514 
2515 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2516 				     u64 seq, unsigned flags)
2517 {
2518 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2519 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2520 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2521 
2522 	/* RELEASE_MEM - flush caches, send int */
2523 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2524 	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2525 					       EOP_TC_NC_ACTION_EN) :
2526 					      (EOP_TCL1_ACTION_EN |
2527 					       EOP_TC_ACTION_EN |
2528 					       EOP_TC_WB_ACTION_EN |
2529 					       EOP_TC_MD_ACTION_EN)) |
2530 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2531 				 EVENT_INDEX(5)));
2532 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2533 
2534 	/*
2535 	 * the address should be Qword aligned if 64bit write, Dword
2536 	 * aligned if only send 32bit data low (discard data high)
2537 	 */
2538 	if (write64bit)
2539 		BUG_ON(addr & 0x7);
2540 	else
2541 		BUG_ON(addr & 0x3);
2542 	amdgpu_ring_write(ring, lower_32_bits(addr));
2543 	amdgpu_ring_write(ring, upper_32_bits(addr));
2544 	amdgpu_ring_write(ring, lower_32_bits(seq));
2545 	amdgpu_ring_write(ring, upper_32_bits(seq));
2546 	amdgpu_ring_write(ring, 0);
2547 }
2548 
2549 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2550 {
2551 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2552 	uint32_t seq = ring->fence_drv.sync_seq;
2553 	uint64_t addr = ring->fence_drv.gpu_addr;
2554 
2555 	gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2556 			      lower_32_bits(addr), upper_32_bits(addr),
2557 			      seq, 0xffffffff, 4);
2558 }
2559 
2560 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2561 					unsigned vmid, uint64_t pd_addr)
2562 {
2563 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2564 }
2565 
2566 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2567 {
2568 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2569 }
2570 
2571 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2572 {
2573 	u64 wptr;
2574 
2575 	/* XXX check if swapping is necessary on BE */
2576 	if (ring->use_doorbell)
2577 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2578 	else
2579 		BUG();
2580 	return wptr;
2581 }
2582 
2583 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2584 {
2585 	struct amdgpu_device *adev = ring->adev;
2586 
2587 	/* XXX check if swapping is necessary on BE */
2588 	if (ring->use_doorbell) {
2589 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2590 		WDOORBELL64(ring->doorbell_index, ring->wptr);
2591 	} else {
2592 		BUG(); /* only DOORBELL method supported on gfx9 now */
2593 	}
2594 }
2595 
2596 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2597 					 u64 seq, unsigned int flags)
2598 {
2599 	struct amdgpu_device *adev = ring->adev;
2600 
2601 	/* we only allocate 32bit for each seq wb address */
2602 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2603 
2604 	/* write fence seq to the "addr" */
2605 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2606 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2607 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
2608 	amdgpu_ring_write(ring, lower_32_bits(addr));
2609 	amdgpu_ring_write(ring, upper_32_bits(addr));
2610 	amdgpu_ring_write(ring, lower_32_bits(seq));
2611 
2612 	if (flags & AMDGPU_FENCE_FLAG_INT) {
2613 		/* set register to trigger INT */
2614 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2615 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2616 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
2617 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
2618 		amdgpu_ring_write(ring, 0);
2619 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
2620 	}
2621 }
2622 
2623 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
2624 				    uint32_t reg_val_offs)
2625 {
2626 	struct amdgpu_device *adev = ring->adev;
2627 
2628 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
2629 	amdgpu_ring_write(ring, 0 |	/* src: register*/
2630 				(5 << 8) |	/* dst: memory */
2631 				(1 << 20));	/* write confirm */
2632 	amdgpu_ring_write(ring, reg);
2633 	amdgpu_ring_write(ring, 0);
2634 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
2635 				reg_val_offs * 4));
2636 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
2637 				reg_val_offs * 4));
2638 }
2639 
2640 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
2641 				    uint32_t val)
2642 {
2643 	uint32_t cmd = 0;
2644 
2645 	switch (ring->funcs->type) {
2646 	case AMDGPU_RING_TYPE_GFX:
2647 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
2648 		break;
2649 	case AMDGPU_RING_TYPE_KIQ:
2650 		cmd = (1 << 16); /* no inc addr */
2651 		break;
2652 	default:
2653 		cmd = WR_CONFIRM;
2654 		break;
2655 	}
2656 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2657 	amdgpu_ring_write(ring, cmd);
2658 	amdgpu_ring_write(ring, reg);
2659 	amdgpu_ring_write(ring, 0);
2660 	amdgpu_ring_write(ring, val);
2661 }
2662 
2663 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
2664 					uint32_t val, uint32_t mask)
2665 {
2666 	gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
2667 }
2668 
2669 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
2670 						  uint32_t reg0, uint32_t reg1,
2671 						  uint32_t ref, uint32_t mask)
2672 {
2673 	amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
2674 						   ref, mask);
2675 }
2676 
2677 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2678 	struct amdgpu_device *adev, int me, int pipe,
2679 	enum amdgpu_interrupt_state state, int xcc_id)
2680 {
2681 	u32 mec_int_cntl, mec_int_cntl_reg;
2682 
2683 	/*
2684 	 * amdgpu controls only the first MEC. That's why this function only
2685 	 * handles the setting of interrupts for this specific MEC. All other
2686 	 * pipes' interrupts are set by amdkfd.
2687 	 */
2688 
2689 	if (me == 1) {
2690 		switch (pipe) {
2691 		case 0:
2692 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
2693 			break;
2694 		case 1:
2695 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
2696 			break;
2697 		case 2:
2698 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
2699 			break;
2700 		case 3:
2701 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
2702 			break;
2703 		default:
2704 			DRM_DEBUG("invalid pipe %d\n", pipe);
2705 			return;
2706 		}
2707 	} else {
2708 		DRM_DEBUG("invalid me %d\n", me);
2709 		return;
2710 	}
2711 
2712 	switch (state) {
2713 	case AMDGPU_IRQ_STATE_DISABLE:
2714 		mec_int_cntl = RREG32(mec_int_cntl_reg);
2715 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
2716 					     TIME_STAMP_INT_ENABLE, 0);
2717 		WREG32(mec_int_cntl_reg, mec_int_cntl);
2718 		break;
2719 	case AMDGPU_IRQ_STATE_ENABLE:
2720 		mec_int_cntl = RREG32(mec_int_cntl_reg);
2721 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
2722 					     TIME_STAMP_INT_ENABLE, 1);
2723 		WREG32(mec_int_cntl_reg, mec_int_cntl);
2724 		break;
2725 	default:
2726 		break;
2727 	}
2728 }
2729 
2730 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
2731 					     struct amdgpu_irq_src *source,
2732 					     unsigned type,
2733 					     enum amdgpu_interrupt_state state)
2734 {
2735 	int i, num_xcc;
2736 
2737 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2738 	switch (state) {
2739 	case AMDGPU_IRQ_STATE_DISABLE:
2740 	case AMDGPU_IRQ_STATE_ENABLE:
2741 		for (i = 0; i < num_xcc; i++)
2742 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
2743 				PRIV_REG_INT_ENABLE,
2744 				state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2745 		break;
2746 	default:
2747 		break;
2748 	}
2749 
2750 	return 0;
2751 }
2752 
2753 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
2754 					      struct amdgpu_irq_src *source,
2755 					      unsigned type,
2756 					      enum amdgpu_interrupt_state state)
2757 {
2758 	int i, num_xcc;
2759 
2760 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2761 	switch (state) {
2762 	case AMDGPU_IRQ_STATE_DISABLE:
2763 	case AMDGPU_IRQ_STATE_ENABLE:
2764 		for (i = 0; i < num_xcc; i++)
2765 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
2766 				PRIV_INSTR_INT_ENABLE,
2767 				state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2768 		break;
2769 	default:
2770 		break;
2771 	}
2772 
2773 	return 0;
2774 }
2775 
2776 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
2777 					    struct amdgpu_irq_src *src,
2778 					    unsigned type,
2779 					    enum amdgpu_interrupt_state state)
2780 {
2781 	int i, num_xcc;
2782 
2783 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2784 	for (i = 0; i < num_xcc; i++) {
2785 		switch (type) {
2786 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
2787 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2788 				adev, 1, 0, state, i);
2789 			break;
2790 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
2791 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2792 				adev, 1, 1, state, i);
2793 			break;
2794 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
2795 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2796 				adev, 1, 2, state, i);
2797 			break;
2798 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
2799 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2800 				adev, 1, 3, state, i);
2801 			break;
2802 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
2803 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2804 				adev, 2, 0, state, i);
2805 			break;
2806 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
2807 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2808 				adev, 2, 1, state, i);
2809 			break;
2810 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
2811 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2812 				adev, 2, 2, state, i);
2813 			break;
2814 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
2815 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2816 				adev, 2, 3, state, i);
2817 			break;
2818 		default:
2819 			break;
2820 		}
2821 	}
2822 
2823 	return 0;
2824 }
2825 
2826 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
2827 			    struct amdgpu_irq_src *source,
2828 			    struct amdgpu_iv_entry *entry)
2829 {
2830 	int i, xcc_id;
2831 	u8 me_id, pipe_id, queue_id;
2832 	struct amdgpu_ring *ring;
2833 
2834 	DRM_DEBUG("IH: CP EOP\n");
2835 	me_id = (entry->ring_id & 0x0c) >> 2;
2836 	pipe_id = (entry->ring_id & 0x03) >> 0;
2837 	queue_id = (entry->ring_id & 0x70) >> 4;
2838 
2839 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
2840 
2841 	if (xcc_id == -EINVAL)
2842 		return -EINVAL;
2843 
2844 	switch (me_id) {
2845 	case 0:
2846 	case 1:
2847 	case 2:
2848 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2849 			ring = &adev->gfx.compute_ring
2850 					[i +
2851 					 xcc_id * adev->gfx.num_compute_rings];
2852 			/* Per-queue interrupt is supported for MEC starting from VI.
2853 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
2854 			  */
2855 
2856 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
2857 				amdgpu_fence_process(ring);
2858 		}
2859 		break;
2860 	}
2861 	return 0;
2862 }
2863 
2864 static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
2865 			   struct amdgpu_iv_entry *entry)
2866 {
2867 	u8 me_id, pipe_id, queue_id;
2868 	struct amdgpu_ring *ring;
2869 	int i, xcc_id;
2870 
2871 	me_id = (entry->ring_id & 0x0c) >> 2;
2872 	pipe_id = (entry->ring_id & 0x03) >> 0;
2873 	queue_id = (entry->ring_id & 0x70) >> 4;
2874 
2875 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
2876 
2877 	if (xcc_id == -EINVAL)
2878 		return;
2879 
2880 	switch (me_id) {
2881 	case 0:
2882 	case 1:
2883 	case 2:
2884 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2885 			ring = &adev->gfx.compute_ring
2886 					[i +
2887 					 xcc_id * adev->gfx.num_compute_rings];
2888 			if (ring->me == me_id && ring->pipe == pipe_id &&
2889 			    ring->queue == queue_id)
2890 				drm_sched_fault(&ring->sched);
2891 		}
2892 		break;
2893 	}
2894 }
2895 
2896 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
2897 				 struct amdgpu_irq_src *source,
2898 				 struct amdgpu_iv_entry *entry)
2899 {
2900 	DRM_ERROR("Illegal register access in command stream\n");
2901 	gfx_v9_4_3_fault(adev, entry);
2902 	return 0;
2903 }
2904 
2905 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
2906 				  struct amdgpu_irq_src *source,
2907 				  struct amdgpu_iv_entry *entry)
2908 {
2909 	DRM_ERROR("Illegal instruction in command stream\n");
2910 	gfx_v9_4_3_fault(adev, entry);
2911 	return 0;
2912 }
2913 
2914 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
2915 {
2916 	const unsigned int cp_coher_cntl =
2917 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
2918 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
2919 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
2920 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
2921 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
2922 
2923 	/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
2924 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
2925 	amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
2926 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
2927 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
2928 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
2929 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
2930 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
2931 }
2932 
2933 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
2934 					uint32_t pipe, bool enable)
2935 {
2936 	struct amdgpu_device *adev = ring->adev;
2937 	uint32_t val;
2938 	uint32_t wcl_cs_reg;
2939 
2940 	/* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
2941 	val = enable ? 0x1 : 0x7f;
2942 
2943 	switch (pipe) {
2944 	case 0:
2945 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
2946 		break;
2947 	case 1:
2948 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
2949 		break;
2950 	case 2:
2951 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
2952 		break;
2953 	case 3:
2954 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
2955 		break;
2956 	default:
2957 		DRM_DEBUG("invalid pipe %d\n", pipe);
2958 		return;
2959 	}
2960 
2961 	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
2962 
2963 }
2964 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
2965 {
2966 	struct amdgpu_device *adev = ring->adev;
2967 	uint32_t val;
2968 	int i;
2969 
2970 	/* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
2971 	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
2972 	 * around 25% of gpu resources.
2973 	 */
2974 	val = enable ? 0x1f : 0x07ffffff;
2975 	amdgpu_ring_emit_wreg(ring,
2976 			      SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
2977 			      val);
2978 
2979 	/* Restrict waves for normal/low priority compute queues as well
2980 	 * to get best QoS for high priority compute jobs.
2981 	 *
2982 	 * amdgpu controls only 1st ME(0-3 CS pipes).
2983 	 */
2984 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2985 		if (i != ring->pipe)
2986 			gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
2987 
2988 	}
2989 }
2990 
2991 enum amdgpu_gfx_cp_ras_mem_id {
2992 	AMDGPU_GFX_CP_MEM1 = 1,
2993 	AMDGPU_GFX_CP_MEM2,
2994 	AMDGPU_GFX_CP_MEM3,
2995 	AMDGPU_GFX_CP_MEM4,
2996 	AMDGPU_GFX_CP_MEM5,
2997 };
2998 
2999 enum amdgpu_gfx_gcea_ras_mem_id {
3000 	AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
3001 	AMDGPU_GFX_GCEA_IORD_CMDMEM,
3002 	AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
3003 	AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
3004 	AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3005 	AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3006 	AMDGPU_GFX_GCEA_MAM_DMEM0,
3007 	AMDGPU_GFX_GCEA_MAM_DMEM1,
3008 	AMDGPU_GFX_GCEA_MAM_DMEM2,
3009 	AMDGPU_GFX_GCEA_MAM_DMEM3,
3010 	AMDGPU_GFX_GCEA_MAM_AMEM0,
3011 	AMDGPU_GFX_GCEA_MAM_AMEM1,
3012 	AMDGPU_GFX_GCEA_MAM_AMEM2,
3013 	AMDGPU_GFX_GCEA_MAM_AMEM3,
3014 	AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3015 	AMDGPU_GFX_GCEA_WRET_TAGMEM,
3016 	AMDGPU_GFX_GCEA_RRET_TAGMEM,
3017 	AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3018 	AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3019 	AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3020 };
3021 
3022 enum amdgpu_gfx_gc_cane_ras_mem_id {
3023 	AMDGPU_GFX_GC_CANE_MEM0 = 0,
3024 };
3025 
3026 enum amdgpu_gfx_gcutcl2_ras_mem_id {
3027 	AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3028 };
3029 
3030 enum amdgpu_gfx_gds_ras_mem_id {
3031 	AMDGPU_GFX_GDS_MEM0 = 0,
3032 };
3033 
3034 enum amdgpu_gfx_lds_ras_mem_id {
3035 	AMDGPU_GFX_LDS_BANK0 = 0,
3036 	AMDGPU_GFX_LDS_BANK1,
3037 	AMDGPU_GFX_LDS_BANK2,
3038 	AMDGPU_GFX_LDS_BANK3,
3039 	AMDGPU_GFX_LDS_BANK4,
3040 	AMDGPU_GFX_LDS_BANK5,
3041 	AMDGPU_GFX_LDS_BANK6,
3042 	AMDGPU_GFX_LDS_BANK7,
3043 	AMDGPU_GFX_LDS_BANK8,
3044 	AMDGPU_GFX_LDS_BANK9,
3045 	AMDGPU_GFX_LDS_BANK10,
3046 	AMDGPU_GFX_LDS_BANK11,
3047 	AMDGPU_GFX_LDS_BANK12,
3048 	AMDGPU_GFX_LDS_BANK13,
3049 	AMDGPU_GFX_LDS_BANK14,
3050 	AMDGPU_GFX_LDS_BANK15,
3051 	AMDGPU_GFX_LDS_BANK16,
3052 	AMDGPU_GFX_LDS_BANK17,
3053 	AMDGPU_GFX_LDS_BANK18,
3054 	AMDGPU_GFX_LDS_BANK19,
3055 	AMDGPU_GFX_LDS_BANK20,
3056 	AMDGPU_GFX_LDS_BANK21,
3057 	AMDGPU_GFX_LDS_BANK22,
3058 	AMDGPU_GFX_LDS_BANK23,
3059 	AMDGPU_GFX_LDS_BANK24,
3060 	AMDGPU_GFX_LDS_BANK25,
3061 	AMDGPU_GFX_LDS_BANK26,
3062 	AMDGPU_GFX_LDS_BANK27,
3063 	AMDGPU_GFX_LDS_BANK28,
3064 	AMDGPU_GFX_LDS_BANK29,
3065 	AMDGPU_GFX_LDS_BANK30,
3066 	AMDGPU_GFX_LDS_BANK31,
3067 	AMDGPU_GFX_LDS_SP_BUFFER_A,
3068 	AMDGPU_GFX_LDS_SP_BUFFER_B,
3069 };
3070 
3071 enum amdgpu_gfx_rlc_ras_mem_id {
3072 	AMDGPU_GFX_RLC_GPMF32 = 1,
3073 	AMDGPU_GFX_RLC_RLCVF32,
3074 	AMDGPU_GFX_RLC_SCRATCH,
3075 	AMDGPU_GFX_RLC_SRM_ARAM,
3076 	AMDGPU_GFX_RLC_SRM_DRAM,
3077 	AMDGPU_GFX_RLC_TCTAG,
3078 	AMDGPU_GFX_RLC_SPM_SE,
3079 	AMDGPU_GFX_RLC_SPM_GRBMT,
3080 };
3081 
3082 enum amdgpu_gfx_sp_ras_mem_id {
3083 	AMDGPU_GFX_SP_SIMDID0 = 0,
3084 };
3085 
3086 enum amdgpu_gfx_spi_ras_mem_id {
3087 	AMDGPU_GFX_SPI_MEM0 = 0,
3088 	AMDGPU_GFX_SPI_MEM1,
3089 	AMDGPU_GFX_SPI_MEM2,
3090 	AMDGPU_GFX_SPI_MEM3,
3091 };
3092 
3093 enum amdgpu_gfx_sqc_ras_mem_id {
3094 	AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3095 	AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3096 	AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3097 	AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3098 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3099 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3100 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3101 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3102 	AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3103 	AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3104 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3105 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3106 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3107 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3108 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3109 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3110 	AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3111 	AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3112 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3113 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3114 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3115 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3116 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3117 };
3118 
3119 enum amdgpu_gfx_sq_ras_mem_id {
3120 	AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3121 	AMDGPU_GFX_SQ_SGPR_MEM1,
3122 	AMDGPU_GFX_SQ_SGPR_MEM2,
3123 	AMDGPU_GFX_SQ_SGPR_MEM3,
3124 };
3125 
3126 enum amdgpu_gfx_ta_ras_mem_id {
3127 	AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3128 	AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3129 	AMDGPU_GFX_TA_FS_CFIFO_RAM,
3130 	AMDGPU_GFX_TA_FSX_LFIFO,
3131 	AMDGPU_GFX_TA_FS_DFIFO_RAM,
3132 };
3133 
3134 enum amdgpu_gfx_tcc_ras_mem_id {
3135 	AMDGPU_GFX_TCC_MEM1 = 1,
3136 };
3137 
3138 enum amdgpu_gfx_tca_ras_mem_id {
3139 	AMDGPU_GFX_TCA_MEM1 = 1,
3140 };
3141 
3142 enum amdgpu_gfx_tci_ras_mem_id {
3143 	AMDGPU_GFX_TCIW_MEM = 1,
3144 };
3145 
3146 enum amdgpu_gfx_tcp_ras_mem_id {
3147 	AMDGPU_GFX_TCP_LFIFO0 = 1,
3148 	AMDGPU_GFX_TCP_SET0BANK0_RAM,
3149 	AMDGPU_GFX_TCP_SET0BANK1_RAM,
3150 	AMDGPU_GFX_TCP_SET0BANK2_RAM,
3151 	AMDGPU_GFX_TCP_SET0BANK3_RAM,
3152 	AMDGPU_GFX_TCP_SET1BANK0_RAM,
3153 	AMDGPU_GFX_TCP_SET1BANK1_RAM,
3154 	AMDGPU_GFX_TCP_SET1BANK2_RAM,
3155 	AMDGPU_GFX_TCP_SET1BANK3_RAM,
3156 	AMDGPU_GFX_TCP_SET2BANK0_RAM,
3157 	AMDGPU_GFX_TCP_SET2BANK1_RAM,
3158 	AMDGPU_GFX_TCP_SET2BANK2_RAM,
3159 	AMDGPU_GFX_TCP_SET2BANK3_RAM,
3160 	AMDGPU_GFX_TCP_SET3BANK0_RAM,
3161 	AMDGPU_GFX_TCP_SET3BANK1_RAM,
3162 	AMDGPU_GFX_TCP_SET3BANK2_RAM,
3163 	AMDGPU_GFX_TCP_SET3BANK3_RAM,
3164 	AMDGPU_GFX_TCP_VM_FIFO,
3165 	AMDGPU_GFX_TCP_DB_TAGRAM0,
3166 	AMDGPU_GFX_TCP_DB_TAGRAM1,
3167 	AMDGPU_GFX_TCP_DB_TAGRAM2,
3168 	AMDGPU_GFX_TCP_DB_TAGRAM3,
3169 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3170 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3171 	AMDGPU_GFX_TCP_CMD_FIFO,
3172 };
3173 
3174 enum amdgpu_gfx_td_ras_mem_id {
3175 	AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3176 	AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3177 	AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3178 };
3179 
3180 enum amdgpu_gfx_tcx_ras_mem_id {
3181 	AMDGPU_GFX_TCX_FIFOD0 = 0,
3182 	AMDGPU_GFX_TCX_FIFOD1,
3183 	AMDGPU_GFX_TCX_FIFOD2,
3184 	AMDGPU_GFX_TCX_FIFOD3,
3185 	AMDGPU_GFX_TCX_FIFOD4,
3186 	AMDGPU_GFX_TCX_FIFOD5,
3187 	AMDGPU_GFX_TCX_FIFOD6,
3188 	AMDGPU_GFX_TCX_FIFOD7,
3189 	AMDGPU_GFX_TCX_FIFOB0,
3190 	AMDGPU_GFX_TCX_FIFOB1,
3191 	AMDGPU_GFX_TCX_FIFOB2,
3192 	AMDGPU_GFX_TCX_FIFOB3,
3193 	AMDGPU_GFX_TCX_FIFOB4,
3194 	AMDGPU_GFX_TCX_FIFOB5,
3195 	AMDGPU_GFX_TCX_FIFOB6,
3196 	AMDGPU_GFX_TCX_FIFOB7,
3197 	AMDGPU_GFX_TCX_FIFOA0,
3198 	AMDGPU_GFX_TCX_FIFOA1,
3199 	AMDGPU_GFX_TCX_FIFOA2,
3200 	AMDGPU_GFX_TCX_FIFOA3,
3201 	AMDGPU_GFX_TCX_FIFOA4,
3202 	AMDGPU_GFX_TCX_FIFOA5,
3203 	AMDGPU_GFX_TCX_FIFOA6,
3204 	AMDGPU_GFX_TCX_FIFOA7,
3205 	AMDGPU_GFX_TCX_CFIFO0,
3206 	AMDGPU_GFX_TCX_CFIFO1,
3207 	AMDGPU_GFX_TCX_CFIFO2,
3208 	AMDGPU_GFX_TCX_CFIFO3,
3209 	AMDGPU_GFX_TCX_CFIFO4,
3210 	AMDGPU_GFX_TCX_CFIFO5,
3211 	AMDGPU_GFX_TCX_CFIFO6,
3212 	AMDGPU_GFX_TCX_CFIFO7,
3213 	AMDGPU_GFX_TCX_FIFO_ACKB0,
3214 	AMDGPU_GFX_TCX_FIFO_ACKB1,
3215 	AMDGPU_GFX_TCX_FIFO_ACKB2,
3216 	AMDGPU_GFX_TCX_FIFO_ACKB3,
3217 	AMDGPU_GFX_TCX_FIFO_ACKB4,
3218 	AMDGPU_GFX_TCX_FIFO_ACKB5,
3219 	AMDGPU_GFX_TCX_FIFO_ACKB6,
3220 	AMDGPU_GFX_TCX_FIFO_ACKB7,
3221 	AMDGPU_GFX_TCX_FIFO_ACKD0,
3222 	AMDGPU_GFX_TCX_FIFO_ACKD1,
3223 	AMDGPU_GFX_TCX_FIFO_ACKD2,
3224 	AMDGPU_GFX_TCX_FIFO_ACKD3,
3225 	AMDGPU_GFX_TCX_FIFO_ACKD4,
3226 	AMDGPU_GFX_TCX_FIFO_ACKD5,
3227 	AMDGPU_GFX_TCX_FIFO_ACKD6,
3228 	AMDGPU_GFX_TCX_FIFO_ACKD7,
3229 	AMDGPU_GFX_TCX_DST_FIFOA0,
3230 	AMDGPU_GFX_TCX_DST_FIFOA1,
3231 	AMDGPU_GFX_TCX_DST_FIFOA2,
3232 	AMDGPU_GFX_TCX_DST_FIFOA3,
3233 	AMDGPU_GFX_TCX_DST_FIFOA4,
3234 	AMDGPU_GFX_TCX_DST_FIFOA5,
3235 	AMDGPU_GFX_TCX_DST_FIFOA6,
3236 	AMDGPU_GFX_TCX_DST_FIFOA7,
3237 	AMDGPU_GFX_TCX_DST_FIFOB0,
3238 	AMDGPU_GFX_TCX_DST_FIFOB1,
3239 	AMDGPU_GFX_TCX_DST_FIFOB2,
3240 	AMDGPU_GFX_TCX_DST_FIFOB3,
3241 	AMDGPU_GFX_TCX_DST_FIFOB4,
3242 	AMDGPU_GFX_TCX_DST_FIFOB5,
3243 	AMDGPU_GFX_TCX_DST_FIFOB6,
3244 	AMDGPU_GFX_TCX_DST_FIFOB7,
3245 	AMDGPU_GFX_TCX_DST_FIFOD0,
3246 	AMDGPU_GFX_TCX_DST_FIFOD1,
3247 	AMDGPU_GFX_TCX_DST_FIFOD2,
3248 	AMDGPU_GFX_TCX_DST_FIFOD3,
3249 	AMDGPU_GFX_TCX_DST_FIFOD4,
3250 	AMDGPU_GFX_TCX_DST_FIFOD5,
3251 	AMDGPU_GFX_TCX_DST_FIFOD6,
3252 	AMDGPU_GFX_TCX_DST_FIFOD7,
3253 	AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3254 	AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3255 	AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3256 	AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3257 	AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3258 	AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3259 	AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3260 	AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3261 	AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3262 	AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3263 	AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3264 	AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3265 	AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3266 	AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3267 	AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3268 	AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3269 };
3270 
3271 enum amdgpu_gfx_atc_l2_ras_mem_id {
3272 	AMDGPU_GFX_ATC_L2_MEM0 = 0,
3273 };
3274 
3275 enum amdgpu_gfx_utcl2_ras_mem_id {
3276 	AMDGPU_GFX_UTCL2_MEM0 = 0,
3277 };
3278 
3279 enum amdgpu_gfx_vml2_ras_mem_id {
3280 	AMDGPU_GFX_VML2_MEM0 = 0,
3281 };
3282 
3283 enum amdgpu_gfx_vml2_walker_ras_mem_id {
3284 	AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3285 };
3286 
3287 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3288 	{AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3289 	{AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3290 	{AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3291 	{AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3292 	{AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3293 };
3294 
3295 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3296 	{AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3297 	{AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3298 	{AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3299 	{AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3300 	{AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3301 	{AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3302 	{AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3303 	{AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3304 	{AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3305 	{AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3306 	{AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3307 	{AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3308 	{AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3309 	{AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3310 	{AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3311 	{AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3312 	{AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3313 	{AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3314 	{AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3315 	{AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3316 };
3317 
3318 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3319 	{AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3320 };
3321 
3322 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
3323 	{AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
3324 };
3325 
3326 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
3327 	{AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
3328 };
3329 
3330 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
3331 	{AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
3332 	{AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
3333 	{AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
3334 	{AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
3335 	{AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
3336 	{AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
3337 	{AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
3338 	{AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
3339 	{AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
3340 	{AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
3341 	{AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
3342 	{AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
3343 	{AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
3344 	{AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
3345 	{AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
3346 	{AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
3347 	{AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
3348 	{AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
3349 	{AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
3350 	{AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
3351 	{AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
3352 	{AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
3353 	{AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
3354 	{AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
3355 	{AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
3356 	{AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
3357 	{AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
3358 	{AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
3359 	{AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
3360 	{AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
3361 	{AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
3362 	{AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
3363 	{AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
3364 	{AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
3365 };
3366 
3367 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
3368 	{AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
3369 	{AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
3370 	{AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
3371 	{AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
3372 	{AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
3373 	{AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
3374 	{AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
3375 	{AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
3376 };
3377 
3378 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
3379 	{AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
3380 };
3381 
3382 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
3383 	{AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
3384 	{AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
3385 	{AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
3386 	{AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
3387 };
3388 
3389 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
3390 	{AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
3391 	{AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
3392 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
3393 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
3394 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
3395 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
3396 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
3397 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
3398 	{AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
3399 	{AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
3400 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
3401 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
3402 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
3403 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
3404 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
3405 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
3406 	{AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
3407 	{AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
3408 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
3409 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
3410 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
3411 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
3412 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
3413 };
3414 
3415 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
3416 	{AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
3417 	{AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
3418 	{AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
3419 	{AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
3420 };
3421 
3422 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
3423 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
3424 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
3425 	{AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
3426 	{AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
3427 	{AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
3428 };
3429 
3430 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
3431 	{AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
3432 };
3433 
3434 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
3435 	{AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
3436 };
3437 
3438 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
3439 	{AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
3440 };
3441 
3442 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
3443 	{AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
3444 	{AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
3445 	{AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
3446 	{AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
3447 	{AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
3448 	{AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
3449 	{AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
3450 	{AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
3451 	{AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
3452 	{AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
3453 	{AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
3454 	{AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
3455 	{AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
3456 	{AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
3457 	{AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
3458 	{AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
3459 	{AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
3460 	{AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
3461 	{AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
3462 	{AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
3463 	{AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
3464 	{AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
3465 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
3466 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
3467 	{AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
3468 };
3469 
3470 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
3471 	{AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
3472 	{AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
3473 	{AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
3474 };
3475 
3476 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
3477 	{AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
3478 	{AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
3479 	{AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
3480 	{AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
3481 	{AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
3482 	{AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
3483 	{AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
3484 	{AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
3485 	{AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
3486 	{AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
3487 	{AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
3488 	{AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
3489 	{AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
3490 	{AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
3491 	{AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
3492 	{AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
3493 	{AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
3494 	{AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
3495 	{AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
3496 	{AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
3497 	{AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
3498 	{AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
3499 	{AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
3500 	{AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
3501 	{AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
3502 	{AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
3503 	{AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
3504 	{AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
3505 	{AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
3506 	{AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
3507 	{AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
3508 	{AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
3509 	{AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
3510 	{AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
3511 	{AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
3512 	{AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
3513 	{AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
3514 	{AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
3515 	{AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
3516 	{AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
3517 	{AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
3518 	{AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
3519 	{AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
3520 	{AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
3521 	{AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
3522 	{AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
3523 	{AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
3524 	{AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
3525 	{AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
3526 	{AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
3527 	{AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
3528 	{AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
3529 	{AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
3530 	{AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
3531 	{AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
3532 	{AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
3533 	{AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
3534 	{AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
3535 	{AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
3536 	{AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
3537 	{AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
3538 	{AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
3539 	{AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
3540 	{AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
3541 	{AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
3542 	{AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
3543 	{AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
3544 	{AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
3545 	{AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
3546 	{AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
3547 	{AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
3548 	{AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
3549 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
3550 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
3551 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
3552 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
3553 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
3554 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
3555 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
3556 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
3557 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
3558 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
3559 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
3560 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
3561 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
3562 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
3563 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
3564 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
3565 };
3566 
3567 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
3568 	{AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
3569 };
3570 
3571 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
3572 	{AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
3573 };
3574 
3575 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
3576 	{AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
3577 };
3578 
3579 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
3580 	{AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
3581 };
3582 
3583 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
3584 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
3585 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
3586 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
3587 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
3588 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
3589 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
3590 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
3591 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
3592 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
3593 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
3594 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
3595 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
3596 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
3597 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
3598 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
3599 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
3600 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
3601 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
3602 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
3603 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
3604 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
3605 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
3606 };
3607 
3608 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
3609 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
3610 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
3611 	    AMDGPU_GFX_RLC_MEM, 1},
3612 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
3613 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
3614 	    AMDGPU_GFX_CP_MEM, 1},
3615 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
3616 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
3617 	    AMDGPU_GFX_CP_MEM, 1},
3618 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
3619 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
3620 	    AMDGPU_GFX_CP_MEM, 1},
3621 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
3622 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
3623 	    AMDGPU_GFX_GDS_MEM, 1},
3624 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
3625 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
3626 	    AMDGPU_GFX_GC_CANE_MEM, 1},
3627 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
3628 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
3629 	    AMDGPU_GFX_SPI_MEM, 8},
3630 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
3631 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
3632 	    AMDGPU_GFX_SP_MEM, 1},
3633 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
3634 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
3635 	    AMDGPU_GFX_SP_MEM, 1},
3636 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
3637 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
3638 	    AMDGPU_GFX_SQ_MEM, 8},
3639 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
3640 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
3641 	    AMDGPU_GFX_SQC_MEM, 8},
3642 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
3643 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
3644 	    AMDGPU_GFX_TCX_MEM, 1},
3645 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
3646 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
3647 	    AMDGPU_GFX_TCC_MEM, 1},
3648 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
3649 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
3650 	    AMDGPU_GFX_TA_MEM, 8},
3651 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
3652 	    31, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
3653 	    AMDGPU_GFX_TCI_MEM, 1},
3654 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
3655 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
3656 	    AMDGPU_GFX_TCP_MEM, 8},
3657 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
3658 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
3659 	    AMDGPU_GFX_TD_MEM, 8},
3660 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
3661 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
3662 	    AMDGPU_GFX_GCEA_MEM, 1},
3663 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
3664 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
3665 	    AMDGPU_GFX_LDS_MEM, 1},
3666 };
3667 
3668 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
3669 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
3670 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
3671 	    AMDGPU_GFX_RLC_MEM, 1},
3672 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
3673 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
3674 	    AMDGPU_GFX_CP_MEM, 1},
3675 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
3676 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
3677 	    AMDGPU_GFX_CP_MEM, 1},
3678 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
3679 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
3680 	    AMDGPU_GFX_CP_MEM, 1},
3681 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
3682 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
3683 	    AMDGPU_GFX_GDS_MEM, 1},
3684 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
3685 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
3686 	    AMDGPU_GFX_GC_CANE_MEM, 1},
3687 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
3688 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
3689 	    AMDGPU_GFX_SPI_MEM, 8},
3690 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
3691 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
3692 	    AMDGPU_GFX_SP_MEM, 1},
3693 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
3694 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
3695 	    AMDGPU_GFX_SP_MEM, 1},
3696 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
3697 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
3698 	    AMDGPU_GFX_SQ_MEM, 8},
3699 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
3700 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
3701 	    AMDGPU_GFX_SQC_MEM, 8},
3702 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
3703 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
3704 	    AMDGPU_GFX_TCX_MEM, 1},
3705 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
3706 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
3707 	    AMDGPU_GFX_TCC_MEM, 1},
3708 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
3709 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
3710 	    AMDGPU_GFX_TA_MEM, 8},
3711 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
3712 	    31, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
3713 	    AMDGPU_GFX_TCI_MEM, 1},
3714 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
3715 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
3716 	    AMDGPU_GFX_TCP_MEM, 8},
3717 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
3718 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
3719 	    AMDGPU_GFX_TD_MEM, 8},
3720 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
3721 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
3722 	    AMDGPU_GFX_TCA_MEM, 1},
3723 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
3724 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
3725 	    AMDGPU_GFX_GCEA_MEM, 1},
3726 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
3727 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
3728 	    AMDGPU_GFX_LDS_MEM, 1},
3729 };
3730 
3731 static const struct soc15_reg_entry gfx_v9_4_3_ea_err_status_regs = {
3732 	SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16
3733 };
3734 
3735 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
3736 					void *ras_error_status, int xcc_id)
3737 {
3738 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
3739 	unsigned long ce_count = 0, ue_count = 0;
3740 	uint32_t i, j, k;
3741 
3742 	mutex_lock(&adev->grbm_idx_mutex);
3743 
3744 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
3745 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
3746 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
3747 				/* no need to select if instance number is 1 */
3748 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
3749 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
3750 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
3751 
3752 				amdgpu_ras_inst_query_ras_error_count(adev,
3753 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
3754 					1,
3755 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
3756 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
3757 					GET_INST(GC, xcc_id),
3758 					AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
3759 					&ce_count);
3760 
3761 				amdgpu_ras_inst_query_ras_error_count(adev,
3762 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
3763 					1,
3764 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
3765 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
3766 					GET_INST(GC, xcc_id),
3767 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
3768 					&ue_count);
3769 			}
3770 		}
3771 	}
3772 
3773 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3774 			xcc_id);
3775 	mutex_unlock(&adev->grbm_idx_mutex);
3776 
3777 	/* the caller should make sure initialize value of
3778 	 * err_data->ue_count and err_data->ce_count
3779 	 */
3780 	err_data->ce_count += ce_count;
3781 	err_data->ue_count += ue_count;
3782 }
3783 
3784 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
3785 					void *ras_error_status, int xcc_id)
3786 {
3787 	uint32_t i, j, k;
3788 
3789 	mutex_lock(&adev->grbm_idx_mutex);
3790 
3791 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
3792 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
3793 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
3794 				/* no need to select if instance number is 1 */
3795 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
3796 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
3797 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
3798 
3799 				amdgpu_ras_inst_reset_ras_error_count(adev,
3800 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
3801 					1,
3802 					GET_INST(GC, xcc_id));
3803 
3804 				amdgpu_ras_inst_reset_ras_error_count(adev,
3805 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
3806 					1,
3807 					GET_INST(GC, xcc_id));
3808 			}
3809 		}
3810 	}
3811 
3812 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3813 			xcc_id);
3814 	mutex_unlock(&adev->grbm_idx_mutex);
3815 }
3816 
3817 static void gfx_v9_4_3_inst_query_ea_err_status(struct amdgpu_device *adev,
3818 					int xcc_id)
3819 {
3820 	uint32_t i, j;
3821 	uint32_t reg_value;
3822 
3823 	mutex_lock(&adev->grbm_idx_mutex);
3824 
3825 	for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) {
3826 		for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) {
3827 			gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id);
3828 			reg_value = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
3829 					regGCEA_ERR_STATUS);
3830 			if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
3831 			    REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
3832 			    REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
3833 				dev_warn(adev->dev,
3834 					"GCEA err detected at instance: %d, status: 0x%x!\n",
3835 					j, reg_value);
3836 			}
3837 			/* clear after read */
3838 			reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS,
3839 						  CLEAR_ERROR_STATUS, 0x1);
3840 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS,
3841 					reg_value);
3842 		}
3843 	}
3844 
3845 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3846 			xcc_id);
3847 	mutex_unlock(&adev->grbm_idx_mutex);
3848 }
3849 
3850 static void gfx_v9_4_3_inst_query_utc_err_status(struct amdgpu_device *adev,
3851 					int xcc_id)
3852 {
3853 	uint32_t data;
3854 
3855 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS);
3856 	if (data) {
3857 		dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
3858 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3);
3859 	}
3860 
3861 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS);
3862 	if (data) {
3863 		dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
3864 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3);
3865 	}
3866 
3867 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
3868 				regVML2_WALKER_MEM_ECC_STATUS);
3869 	if (data) {
3870 		dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
3871 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS,
3872 				0x3);
3873 	}
3874 }
3875 
3876 static void gfx_v9_4_3_log_cu_timeout_status(struct amdgpu_device *adev,
3877 					uint32_t status, int xcc_id)
3878 {
3879 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
3880 	uint32_t i, simd, wave;
3881 	uint32_t wave_status;
3882 	uint32_t wave_pc_lo, wave_pc_hi;
3883 	uint32_t wave_exec_lo, wave_exec_hi;
3884 	uint32_t wave_inst_dw0, wave_inst_dw1;
3885 	uint32_t wave_ib_sts;
3886 
3887 	for (i = 0; i < 32; i++) {
3888 		if (!((i << 1) & status))
3889 			continue;
3890 
3891 		simd = i / cu_info->max_waves_per_simd;
3892 		wave = i % cu_info->max_waves_per_simd;
3893 
3894 		wave_status = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
3895 		wave_pc_lo = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
3896 		wave_pc_hi = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
3897 		wave_exec_lo =
3898 			wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
3899 		wave_exec_hi =
3900 			wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
3901 		wave_inst_dw0 =
3902 			wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
3903 		wave_inst_dw1 =
3904 			wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
3905 		wave_ib_sts = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
3906 
3907 		dev_info(
3908 			adev->dev,
3909 			"\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n",
3910 			simd, wave, wave_status,
3911 			((uint64_t)wave_pc_hi << 32 | wave_pc_lo),
3912 			((uint64_t)wave_exec_hi << 32 | wave_exec_lo),
3913 			((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0),
3914 			wave_ib_sts);
3915 	}
3916 }
3917 
3918 static void gfx_v9_4_3_inst_query_sq_timeout_status(struct amdgpu_device *adev,
3919 					int xcc_id)
3920 {
3921 	uint32_t se_idx, sh_idx, cu_idx;
3922 	uint32_t status;
3923 
3924 	mutex_lock(&adev->grbm_idx_mutex);
3925 	for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) {
3926 		for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) {
3927 			for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) {
3928 				gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx,
3929 							cu_idx, xcc_id);
3930 				status = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
3931 						      regSQ_TIMEOUT_STATUS);
3932 				if (status != 0) {
3933 					dev_info(
3934 						adev->dev,
3935 						"GFX Watchdog Timeout: SE %d, SH %d, CU %d\n",
3936 						se_idx, sh_idx, cu_idx);
3937 					gfx_v9_4_3_log_cu_timeout_status(
3938 						adev, status, xcc_id);
3939 				}
3940 				/* clear old status */
3941 				WREG32_SOC15(GC, GET_INST(GC, xcc_id),
3942 						regSQ_TIMEOUT_STATUS, 0);
3943 			}
3944 		}
3945 	}
3946 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3947 			xcc_id);
3948 	mutex_unlock(&adev->grbm_idx_mutex);
3949 }
3950 
3951 static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev,
3952 					void *ras_error_status, int xcc_id)
3953 {
3954 	gfx_v9_4_3_inst_query_ea_err_status(adev, xcc_id);
3955 	gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id);
3956 	gfx_v9_4_3_inst_query_sq_timeout_status(adev, xcc_id);
3957 }
3958 
3959 static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev,
3960 					int xcc_id)
3961 {
3962 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3);
3963 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3);
3964 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, 0x3);
3965 }
3966 
3967 static void gfx_v9_4_3_inst_reset_ea_err_status(struct amdgpu_device *adev,
3968 					int xcc_id)
3969 {
3970 	uint32_t i, j;
3971 	uint32_t value;
3972 
3973 	mutex_lock(&adev->grbm_idx_mutex);
3974 	for (i = 0; i < gfx_v9_4_3_ea_err_status_regs.se_num; i++) {
3975 		for (j = 0; j < gfx_v9_4_3_ea_err_status_regs.instance; j++) {
3976 			gfx_v9_4_3_xcc_select_se_sh(adev, i, 0, j, xcc_id);
3977 			value = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS);
3978 			value = REG_SET_FIELD(value, GCEA_ERR_STATUS,
3979 						CLEAR_ERROR_STATUS, 0x1);
3980 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGCEA_ERR_STATUS, value);
3981 		}
3982 	}
3983 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3984 			xcc_id);
3985 	mutex_unlock(&adev->grbm_idx_mutex);
3986 }
3987 
3988 static void gfx_v9_4_3_inst_reset_sq_timeout_status(struct amdgpu_device *adev,
3989 					int xcc_id)
3990 {
3991 	uint32_t se_idx, sh_idx, cu_idx;
3992 
3993 	mutex_lock(&adev->grbm_idx_mutex);
3994 	for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) {
3995 		for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) {
3996 			for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) {
3997 				gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx,
3998 							cu_idx, xcc_id);
3999 				WREG32_SOC15(GC, GET_INST(GC, xcc_id),
4000 						regSQ_TIMEOUT_STATUS, 0);
4001 			}
4002 		}
4003 	}
4004 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4005 			xcc_id);
4006 	mutex_unlock(&adev->grbm_idx_mutex);
4007 }
4008 
4009 static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev,
4010 					void *ras_error_status, int xcc_id)
4011 {
4012 	gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id);
4013 	gfx_v9_4_3_inst_reset_ea_err_status(adev, xcc_id);
4014 	gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id);
4015 }
4016 
4017 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
4018 					void *ras_error_status)
4019 {
4020 	amdgpu_gfx_ras_error_func(adev, ras_error_status,
4021 			gfx_v9_4_3_inst_query_ras_err_count);
4022 }
4023 
4024 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
4025 {
4026 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
4027 }
4028 
4029 static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev)
4030 {
4031 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status);
4032 }
4033 
4034 static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev)
4035 {
4036 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status);
4037 }
4038 
4039 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
4040 	.name = "gfx_v9_4_3",
4041 	.early_init = gfx_v9_4_3_early_init,
4042 	.late_init = gfx_v9_4_3_late_init,
4043 	.sw_init = gfx_v9_4_3_sw_init,
4044 	.sw_fini = gfx_v9_4_3_sw_fini,
4045 	.hw_init = gfx_v9_4_3_hw_init,
4046 	.hw_fini = gfx_v9_4_3_hw_fini,
4047 	.suspend = gfx_v9_4_3_suspend,
4048 	.resume = gfx_v9_4_3_resume,
4049 	.is_idle = gfx_v9_4_3_is_idle,
4050 	.wait_for_idle = gfx_v9_4_3_wait_for_idle,
4051 	.soft_reset = gfx_v9_4_3_soft_reset,
4052 	.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4053 	.set_powergating_state = gfx_v9_4_3_set_powergating_state,
4054 	.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4055 };
4056 
4057 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4058 	.type = AMDGPU_RING_TYPE_COMPUTE,
4059 	.align_mask = 0xff,
4060 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4061 	.support_64bit_ptrs = true,
4062 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4063 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4064 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4065 	.emit_frame_size =
4066 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4067 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4068 		5 + /* hdp invalidate */
4069 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4070 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4071 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4072 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4073 		8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4074 		7 + /* gfx_v9_4_3_emit_mem_sync */
4075 		5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4076 		15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4077 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4078 	.emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4079 	.emit_fence = gfx_v9_4_3_ring_emit_fence,
4080 	.emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4081 	.emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4082 	.emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4083 	.emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4084 	.test_ring = gfx_v9_4_3_ring_test_ring,
4085 	.test_ib = gfx_v9_4_3_ring_test_ib,
4086 	.insert_nop = amdgpu_ring_insert_nop,
4087 	.pad_ib = amdgpu_ring_generic_pad_ib,
4088 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4089 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4090 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4091 	.emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4092 	.emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4093 };
4094 
4095 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4096 	.type = AMDGPU_RING_TYPE_KIQ,
4097 	.align_mask = 0xff,
4098 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4099 	.support_64bit_ptrs = true,
4100 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4101 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4102 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4103 	.emit_frame_size =
4104 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4105 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4106 		5 + /* hdp invalidate */
4107 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4108 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4109 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4110 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4111 		8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4112 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4113 	.emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4114 	.test_ring = gfx_v9_4_3_ring_test_ring,
4115 	.insert_nop = amdgpu_ring_insert_nop,
4116 	.pad_ib = amdgpu_ring_generic_pad_ib,
4117 	.emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4118 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4119 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4120 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4121 };
4122 
4123 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4124 {
4125 	int i, j, num_xcc;
4126 
4127 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4128 	for (i = 0; i < num_xcc; i++) {
4129 		adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4130 
4131 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
4132 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4133 					= &gfx_v9_4_3_ring_funcs_compute;
4134 	}
4135 }
4136 
4137 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4138 	.set = gfx_v9_4_3_set_eop_interrupt_state,
4139 	.process = gfx_v9_4_3_eop_irq,
4140 };
4141 
4142 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4143 	.set = gfx_v9_4_3_set_priv_reg_fault_state,
4144 	.process = gfx_v9_4_3_priv_reg_irq,
4145 };
4146 
4147 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4148 	.set = gfx_v9_4_3_set_priv_inst_fault_state,
4149 	.process = gfx_v9_4_3_priv_inst_irq,
4150 };
4151 
4152 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4153 {
4154 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4155 	adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4156 
4157 	adev->gfx.priv_reg_irq.num_types = 1;
4158 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4159 
4160 	adev->gfx.priv_inst_irq.num_types = 1;
4161 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4162 }
4163 
4164 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4165 {
4166 	adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4167 }
4168 
4169 
4170 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4171 {
4172 	/* init asci gds info */
4173 	switch (adev->ip_versions[GC_HWIP][0]) {
4174 	case IP_VERSION(9, 4, 3):
4175 		/* 9.4.3 removed all the GDS internal memory,
4176 		 * only support GWS opcode in kernel, like barrier
4177 		 * semaphore.etc */
4178 		adev->gds.gds_size = 0;
4179 		break;
4180 	default:
4181 		adev->gds.gds_size = 0x10000;
4182 		break;
4183 	}
4184 
4185 	switch (adev->ip_versions[GC_HWIP][0]) {
4186 	case IP_VERSION(9, 4, 3):
4187 		/* deprecated for 9.4.3, no usage at all */
4188 		adev->gds.gds_compute_max_wave_id = 0;
4189 		break;
4190 	default:
4191 		/* this really depends on the chip */
4192 		adev->gds.gds_compute_max_wave_id = 0x7ff;
4193 		break;
4194 	}
4195 
4196 	adev->gds.gws_size = 64;
4197 	adev->gds.oa_size = 16;
4198 }
4199 
4200 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4201 						 u32 bitmap)
4202 {
4203 	u32 data;
4204 
4205 	if (!bitmap)
4206 		return;
4207 
4208 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4209 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4210 
4211 	WREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG, data);
4212 }
4213 
4214 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
4215 {
4216 	u32 data, mask;
4217 
4218 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_GC_SHADER_ARRAY_CONFIG);
4219 	data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG);
4220 
4221 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4222 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4223 
4224 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4225 
4226 	return (~data) & mask;
4227 }
4228 
4229 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4230 				 struct amdgpu_cu_info *cu_info)
4231 {
4232 	int i, j, k, counter, active_cu_number = 0;
4233 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4234 	unsigned disable_masks[4 * 4];
4235 
4236 	if (!adev || !cu_info)
4237 		return -EINVAL;
4238 
4239 	/*
4240 	 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4241 	 */
4242 	if (adev->gfx.config.max_shader_engines *
4243 		adev->gfx.config.max_sh_per_se > 16)
4244 		return -EINVAL;
4245 
4246 	amdgpu_gfx_parse_disable_cu(disable_masks,
4247 				    adev->gfx.config.max_shader_engines,
4248 				    adev->gfx.config.max_sh_per_se);
4249 
4250 	mutex_lock(&adev->grbm_idx_mutex);
4251 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4252 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4253 			mask = 1;
4254 			ao_bitmap = 0;
4255 			counter = 0;
4256 			gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 0);
4257 			gfx_v9_4_3_set_user_cu_inactive_bitmap(
4258 				adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
4259 			bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev);
4260 
4261 			/*
4262 			 * The bitmap(and ao_cu_bitmap) in cu_info structure is
4263 			 * 4x4 size array, and it's usually suitable for Vega
4264 			 * ASICs which has 4*2 SE/SH layout.
4265 			 * But for Arcturus, SE/SH layout is changed to 8*1.
4266 			 * To mostly reduce the impact, we make it compatible
4267 			 * with current bitmap array as below:
4268 			 *    SE4,SH0 --> bitmap[0][1]
4269 			 *    SE5,SH0 --> bitmap[1][1]
4270 			 *    SE6,SH0 --> bitmap[2][1]
4271 			 *    SE7,SH0 --> bitmap[3][1]
4272 			 */
4273 			cu_info->bitmap[i % 4][j + i / 4] = bitmap;
4274 
4275 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4276 				if (bitmap & mask) {
4277 					if (counter < adev->gfx.config.max_cu_per_sh)
4278 						ao_bitmap |= mask;
4279 					counter++;
4280 				}
4281 				mask <<= 1;
4282 			}
4283 			active_cu_number += counter;
4284 			if (i < 2 && j < 2)
4285 				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4286 			cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
4287 		}
4288 	}
4289 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4290 				    0);
4291 	mutex_unlock(&adev->grbm_idx_mutex);
4292 
4293 	cu_info->number = active_cu_number;
4294 	cu_info->ao_cu_mask = ao_cu_mask;
4295 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4296 
4297 	return 0;
4298 }
4299 
4300 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
4301 	.type = AMD_IP_BLOCK_TYPE_GFX,
4302 	.major = 9,
4303 	.minor = 4,
4304 	.rev = 0,
4305 	.funcs = &gfx_v9_4_3_ip_funcs,
4306 };
4307 
4308 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
4309 {
4310 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4311 	uint32_t tmp_mask;
4312 	int i, r;
4313 
4314 	/* TODO : Initialize golden regs */
4315 	/* gfx_v9_4_3_init_golden_registers(adev); */
4316 
4317 	tmp_mask = inst_mask;
4318 	for_each_inst(i, tmp_mask)
4319 		gfx_v9_4_3_xcc_constants_init(adev, i);
4320 
4321 	if (!amdgpu_sriov_vf(adev)) {
4322 		tmp_mask = inst_mask;
4323 		for_each_inst(i, tmp_mask) {
4324 			r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
4325 			if (r)
4326 				return r;
4327 		}
4328 	}
4329 
4330 	tmp_mask = inst_mask;
4331 	for_each_inst(i, tmp_mask) {
4332 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
4333 		if (r)
4334 			return r;
4335 	}
4336 
4337 	return 0;
4338 }
4339 
4340 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
4341 {
4342 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4343 	int i;
4344 
4345 	for_each_inst(i, inst_mask)
4346 		gfx_v9_4_3_xcc_fini(adev, i);
4347 
4348 	return 0;
4349 }
4350 
4351 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
4352 	.suspend = &gfx_v9_4_3_xcp_suspend,
4353 	.resume = &gfx_v9_4_3_xcp_resume
4354 };
4355 
4356 struct amdgpu_ras_block_hw_ops  gfx_v9_4_3_ras_ops = {
4357 	.query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
4358 	.reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
4359 	.query_ras_error_status = &gfx_v9_4_3_query_ras_error_status,
4360 	.reset_ras_error_status = &gfx_v9_4_3_reset_ras_error_status,
4361 };
4362 
4363 struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
4364 	.ras_block = {
4365 		.hw_ops = &gfx_v9_4_3_ras_ops,
4366 	},
4367 };
4368