1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_ras.h"
25 #include "mmhub_v9_4.h"
26 
27 #include "mmhub/mmhub_9_4_1_offset.h"
28 #include "mmhub/mmhub_9_4_1_sh_mask.h"
29 #include "mmhub/mmhub_9_4_1_default.h"
30 #include "athub/athub_1_0_offset.h"
31 #include "athub/athub_1_0_sh_mask.h"
32 #include "vega10_enum.h"
33 #include "soc15.h"
34 #include "soc15_common.h"
35 
36 #define MMHUB_NUM_INSTANCES			2
37 #define MMHUB_INSTANCE_REGISTER_OFFSET		0x3000
38 
39 u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
40 {
41 	/* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */
42 	u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE);
43 	u64 top = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP);
44 
45 	base &= VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
46 	base <<= 24;
47 
48 	top &= VMSHAREDVC0_MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
49 	top <<= 24;
50 
51 	adev->gmc.fb_start = base;
52 	adev->gmc.fb_end = top;
53 
54 	return base;
55 }
56 
57 static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid,
58 				uint32_t vmid, uint64_t value)
59 {
60 	/* two registers distance between mmVML2VC0_VM_CONTEXT0_* to
61 	 * mmVML2VC0_VM_CONTEXT1_*
62 	 */
63 	int dist = mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
64 			- mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
65 
66 	WREG32_SOC15_OFFSET(MMHUB, 0,
67 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
68 			    dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
69 			    lower_32_bits(value));
70 
71 	WREG32_SOC15_OFFSET(MMHUB, 0,
72 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
73 			    dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
74 			    upper_32_bits(value));
75 
76 }
77 
78 static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
79 					       int hubid)
80 {
81 	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
82 
83 	mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base);
84 
85 	WREG32_SOC15_OFFSET(MMHUB, 0,
86 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
87 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
88 			    (u32)(adev->gmc.gart_start >> 12));
89 	WREG32_SOC15_OFFSET(MMHUB, 0,
90 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
91 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
92 			    (u32)(adev->gmc.gart_start >> 44));
93 
94 	WREG32_SOC15_OFFSET(MMHUB, 0,
95 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
96 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
97 			    (u32)(adev->gmc.gart_end >> 12));
98 	WREG32_SOC15_OFFSET(MMHUB, 0,
99 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
100 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
101 			    (u32)(adev->gmc.gart_end >> 44));
102 }
103 
104 void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
105 				uint64_t page_table_base)
106 {
107 	int i;
108 
109 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++)
110 		mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid,
111 				page_table_base);
112 }
113 
114 static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
115 					         int hubid)
116 {
117 	uint64_t value;
118 	uint32_t tmp;
119 
120 	/* Program the AGP BAR */
121 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BASE,
122 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
123 			    0);
124 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_TOP,
125 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
126 			    adev->gmc.agp_end >> 24);
127 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BOT,
128 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
129 			    adev->gmc.agp_start >> 24);
130 
131 	if (!amdgpu_sriov_vf(adev)) {
132 		/* Program the system aperture low logical page number. */
133 		WREG32_SOC15_OFFSET(
134 			MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
135 			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
136 			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
137 		WREG32_SOC15_OFFSET(
138 			MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
139 			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
140 			max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
141 
142 		/* Set default page address. */
143 		value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
144 			adev->vm_manager.vram_base_offset;
145 		WREG32_SOC15_OFFSET(
146 			MMHUB, 0,
147 			mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
148 			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
149 			(u32)(value >> 12));
150 		WREG32_SOC15_OFFSET(
151 			MMHUB, 0,
152 			mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
153 			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
154 			(u32)(value >> 44));
155 
156 		/* Program "protection fault". */
157 		WREG32_SOC15_OFFSET(
158 			MMHUB, 0,
159 			mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
160 			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
161 			(u32)(adev->dummy_page_addr >> 12));
162 		WREG32_SOC15_OFFSET(
163 			MMHUB, 0,
164 			mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
165 			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
166 			(u32)((u64)adev->dummy_page_addr >> 44));
167 
168 		tmp = RREG32_SOC15_OFFSET(
169 			MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
170 			hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
171 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
172 				    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
173 		WREG32_SOC15_OFFSET(MMHUB, 0,
174 				    mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
175 				    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
176 				    tmp);
177 	}
178 }
179 
180 static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
181 {
182 	uint32_t tmp;
183 
184 	/* Setup TLB control */
185 	tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
186 			   mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
187 			   hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
188 
189 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
190 			    ENABLE_L1_TLB, 1);
191 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
192 			    SYSTEM_ACCESS_MODE, 3);
193 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
194 			    ENABLE_ADVANCED_DRIVER_MODEL, 1);
195 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
196 			    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
197 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
198 			    ECO_BITS, 0);
199 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
200 			    MTYPE, MTYPE_UC);/* XXX for emulation. */
201 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
202 			    ATC_EN, 1);
203 
204 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
205 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
206 }
207 
208 static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
209 {
210 	uint32_t tmp;
211 
212 	/* Setup L2 cache */
213 	tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
214 				  hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
215 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
216 			    ENABLE_L2_CACHE, 1);
217 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
218 			    ENABLE_L2_FRAGMENT_PROCESSING, 1);
219 	/* XXX for emulation, Refer to closed source code.*/
220 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
221 			    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
222 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
223 			    PDE_FAULT_CLASSIFICATION, 0);
224 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
225 			    CONTEXT1_IDENTITY_ACCESS_MODE, 1);
226 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
227 			    IDENTITY_MODE_FRAGMENT_SIZE, 0);
228 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
229 		     hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
230 
231 	tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
232 				  hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
233 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
234 			    INVALIDATE_ALL_L1_TLBS, 1);
235 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
236 			    INVALIDATE_L2_CACHE, 1);
237 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
238 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
239 
240 	tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
241 	if (adev->gmc.translate_further) {
242 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
243 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
244 				    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
245 	} else {
246 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
247 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
248 				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
249 	}
250 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
251 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
252 
253 	tmp = mmVML2PF0_VM_L2_CNTL4_DEFAULT;
254 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
255 			    VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
256 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
257 			    VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
258 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL4,
259 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
260 }
261 
262 static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev,
263 					    int hubid)
264 {
265 	uint32_t tmp;
266 
267 	tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
268 				  hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
269 	tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
270 	tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
271 	tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL,
272 			    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
273 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
274 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
275 }
276 
277 static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
278 						 int hubid)
279 {
280 	WREG32_SOC15_OFFSET(MMHUB, 0,
281 		    mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
282 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0XFFFFFFFF);
283 	WREG32_SOC15_OFFSET(MMHUB, 0,
284 		    mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
285 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0x0000000F);
286 
287 	WREG32_SOC15_OFFSET(MMHUB, 0,
288 		    mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
289 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
290 	WREG32_SOC15_OFFSET(MMHUB, 0,
291 		    mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
292 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
293 
294 	WREG32_SOC15_OFFSET(MMHUB, 0,
295 		    mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
296 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
297 	WREG32_SOC15_OFFSET(MMHUB, 0,
298 		    mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
299 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
300 }
301 
302 static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
303 {
304 	uint32_t tmp;
305 	int i;
306 
307 	for (i = 0; i <= 14; i++) {
308 		tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
309 				hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
310 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
311 				    ENABLE_CONTEXT, 1);
312 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
313 				    PAGE_TABLE_DEPTH,
314 				    adev->vm_manager.num_level);
315 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
316 				    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
317 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
318 				    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
319 				    1);
320 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
321 				    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
322 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
323 				    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
324 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
325 				    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
326 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
327 				    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
328 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
329 				    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
330 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
331 				    PAGE_TABLE_BLOCK_SIZE,
332 				    adev->vm_manager.block_size - 9);
333 		/* Send no-retry XNACK on fault to suppress VM fault storm. */
334 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
335 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
336 				    !amdgpu_noretry);
337 		WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
338 				    hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i,
339 				    tmp);
340 		WREG32_SOC15_OFFSET(MMHUB, 0,
341 			    mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
342 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0);
343 		WREG32_SOC15_OFFSET(MMHUB, 0,
344 			    mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
345 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0);
346 		WREG32_SOC15_OFFSET(MMHUB, 0,
347 				mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
348 				hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2,
349 				lower_32_bits(adev->vm_manager.max_pfn - 1));
350 		WREG32_SOC15_OFFSET(MMHUB, 0,
351 				mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
352 				hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2,
353 				upper_32_bits(adev->vm_manager.max_pfn - 1));
354 	}
355 }
356 
357 static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev,
358 					    int hubid)
359 {
360 	unsigned i;
361 
362 	for (i = 0; i < 18; ++i) {
363 		WREG32_SOC15_OFFSET(MMHUB, 0,
364 				mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
365 				hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i,
366 				0xffffffff);
367 		WREG32_SOC15_OFFSET(MMHUB, 0,
368 				mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
369 				hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i,
370 				0x1f);
371 	}
372 }
373 
374 int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
375 {
376 	int i;
377 
378 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
379 		/* GART Enable. */
380 		mmhub_v9_4_init_gart_aperture_regs(adev, i);
381 		mmhub_v9_4_init_system_aperture_regs(adev, i);
382 		mmhub_v9_4_init_tlb_regs(adev, i);
383 		if (!amdgpu_sriov_vf(adev))
384 			mmhub_v9_4_init_cache_regs(adev, i);
385 
386 		mmhub_v9_4_enable_system_domain(adev, i);
387 		if (!amdgpu_sriov_vf(adev))
388 			mmhub_v9_4_disable_identity_aperture(adev, i);
389 		mmhub_v9_4_setup_vmid_config(adev, i);
390 		mmhub_v9_4_program_invalidation(adev, i);
391 	}
392 
393 	return 0;
394 }
395 
396 void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
397 {
398 	u32 tmp;
399 	u32 i, j;
400 
401 	for (j = 0; j < MMHUB_NUM_INSTANCES; j++) {
402 		/* Disable all tables */
403 		for (i = 0; i < 16; i++)
404 			WREG32_SOC15_OFFSET(MMHUB, 0,
405 					    mmVML2VC0_VM_CONTEXT0_CNTL,
406 					    j * MMHUB_INSTANCE_REGISTER_OFFSET +
407 					    i, 0);
408 
409 		/* Setup TLB control */
410 		tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
411 				   mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
412 				   j * MMHUB_INSTANCE_REGISTER_OFFSET);
413 		tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
414 				    ENABLE_L1_TLB, 0);
415 		tmp = REG_SET_FIELD(tmp,
416 				    VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
417 				    ENABLE_ADVANCED_DRIVER_MODEL, 0);
418 		WREG32_SOC15_OFFSET(MMHUB, 0,
419 				    mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
420 				    j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
421 
422 		/* Setup L2 cache */
423 		tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
424 					  j * MMHUB_INSTANCE_REGISTER_OFFSET);
425 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
426 				    ENABLE_L2_CACHE, 0);
427 		WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
428 				    j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
429 		WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
430 				    j * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
431 	}
432 }
433 
434 /**
435  * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
436  *
437  * @adev: amdgpu_device pointer
438  * @value: true redirects VM faults to the default page
439  */
440 void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
441 {
442 	u32 tmp;
443 	int i;
444 
445 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
446 		tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
447 					  mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
448 					  i * MMHUB_INSTANCE_REGISTER_OFFSET);
449 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
450 				    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT,
451 				    value);
452 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
453 				    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT,
454 				    value);
455 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
456 				    PDE1_PROTECTION_FAULT_ENABLE_DEFAULT,
457 				    value);
458 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
459 				    PDE2_PROTECTION_FAULT_ENABLE_DEFAULT,
460 				    value);
461 		tmp = REG_SET_FIELD(tmp,
462 			    VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
463 			    TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
464 			    value);
465 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
466 				    NACK_PROTECTION_FAULT_ENABLE_DEFAULT,
467 				    value);
468 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
469 				    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
470 				    value);
471 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
472 				    VALID_PROTECTION_FAULT_ENABLE_DEFAULT,
473 				    value);
474 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
475 				    READ_PROTECTION_FAULT_ENABLE_DEFAULT,
476 				    value);
477 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
478 				    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT,
479 				    value);
480 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
481 				    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT,
482 				    value);
483 		if (!value) {
484 			tmp = REG_SET_FIELD(tmp,
485 					    VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
486 					    CRASH_ON_NO_RETRY_FAULT, 1);
487 			tmp = REG_SET_FIELD(tmp,
488 					    VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
489 					    CRASH_ON_RETRY_FAULT, 1);
490 		}
491 
492 		WREG32_SOC15_OFFSET(MMHUB, 0,
493 				    mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
494 				    i * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
495 	}
496 }
497 
498 void mmhub_v9_4_init(struct amdgpu_device *adev)
499 {
500 	struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] =
501 		{&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]};
502 	int i;
503 
504 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
505 		hub[i]->ctx0_ptb_addr_lo32 =
506 			SOC15_REG_OFFSET(MMHUB, 0,
507 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) +
508 			    i * MMHUB_INSTANCE_REGISTER_OFFSET;
509 		hub[i]->ctx0_ptb_addr_hi32 =
510 			SOC15_REG_OFFSET(MMHUB, 0,
511 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
512 			    i * MMHUB_INSTANCE_REGISTER_OFFSET;
513 		hub[i]->vm_inv_eng0_sem =
514 			SOC15_REG_OFFSET(MMHUB, 0,
515 					 mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
516 					 i * MMHUB_INSTANCE_REGISTER_OFFSET;
517 		hub[i]->vm_inv_eng0_req =
518 			SOC15_REG_OFFSET(MMHUB, 0,
519 					 mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
520 					 i * MMHUB_INSTANCE_REGISTER_OFFSET;
521 		hub[i]->vm_inv_eng0_ack =
522 			SOC15_REG_OFFSET(MMHUB, 0,
523 					 mmVML2VC0_VM_INVALIDATE_ENG0_ACK) +
524 					 i * MMHUB_INSTANCE_REGISTER_OFFSET;
525 		hub[i]->vm_context0_cntl =
526 			SOC15_REG_OFFSET(MMHUB, 0,
527 					 mmVML2VC0_VM_CONTEXT0_CNTL) +
528 					 i * MMHUB_INSTANCE_REGISTER_OFFSET;
529 		hub[i]->vm_l2_pro_fault_status =
530 			SOC15_REG_OFFSET(MMHUB, 0,
531 				    mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS) +
532 				    i * MMHUB_INSTANCE_REGISTER_OFFSET;
533 		hub[i]->vm_l2_pro_fault_cntl =
534 			SOC15_REG_OFFSET(MMHUB, 0,
535 				    mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL) +
536 				    i * MMHUB_INSTANCE_REGISTER_OFFSET;
537 	}
538 }
539 
540 static void mmhub_v9_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
541 							bool enable)
542 {
543 	uint32_t def, data, def1, data1;
544 	int i, j;
545 	int dist = mmDAGB1_CNTL_MISC2 - mmDAGB0_CNTL_MISC2;
546 
547 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
548 		def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
549 					mmATCL2_0_ATC_L2_MISC_CG,
550 					i * MMHUB_INSTANCE_REGISTER_OFFSET);
551 
552 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
553 			data |= ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
554 		else
555 			data &= ~ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
556 
557 		if (def != data)
558 			WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
559 				i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
560 
561 		for (j = 0; j < 5; j++) {
562 			def1 = data1 = RREG32_SOC15_OFFSET(MMHUB, 0,
563 					mmDAGB0_CNTL_MISC2,
564 					i * MMHUB_INSTANCE_REGISTER_OFFSET +
565 					j * dist);
566 			if (enable &&
567 			    (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
568 				data1 &=
569 				    ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
570 				    DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
571 				    DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
572 				    DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
573 				    DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
574 				    DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
575 			} else {
576 				data1 |=
577 				    (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
578 				    DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
579 				    DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
580 				    DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
581 				    DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
582 				    DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
583 			}
584 
585 			if (def1 != data1)
586 				WREG32_SOC15_OFFSET(MMHUB, 0,
587 					mmDAGB0_CNTL_MISC2,
588 					i * MMHUB_INSTANCE_REGISTER_OFFSET +
589 					j * dist, data1);
590 
591 			if (i == 1 && j == 3)
592 				break;
593 		}
594 	}
595 }
596 
597 static void mmhub_v9_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
598 						       bool enable)
599 {
600 	uint32_t def, data;
601 	int i;
602 
603 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
604 		def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
605 					mmATCL2_0_ATC_L2_MISC_CG,
606 					i * MMHUB_INSTANCE_REGISTER_OFFSET);
607 
608 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
609 			data |= ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
610 		else
611 			data &= ~ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
612 
613 		if (def != data)
614 			WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
615 				i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
616 	}
617 }
618 
619 int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
620 			       enum amd_clockgating_state state)
621 {
622 	if (amdgpu_sriov_vf(adev))
623 		return 0;
624 
625 	switch (adev->asic_type) {
626 	case CHIP_ARCTURUS:
627 		mmhub_v9_4_update_medium_grain_clock_gating(adev,
628 				state == AMD_CG_STATE_GATE ? true : false);
629 		mmhub_v9_4_update_medium_grain_light_sleep(adev,
630 				state == AMD_CG_STATE_GATE ? true : false);
631 		break;
632 	default:
633 		break;
634 	}
635 
636 	return 0;
637 }
638 
639 void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
640 {
641 	int data, data1;
642 
643 	if (amdgpu_sriov_vf(adev))
644 		*flags = 0;
645 
646 	/* AMD_CG_SUPPORT_MC_MGCG */
647 	data = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
648 
649 	data1 = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
650 
651 	if ((data & ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK) &&
652 	    !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
653 		       DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
654 		       DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
655 		       DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
656 		       DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
657 		       DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
658 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
659 
660 	/* AMD_CG_SUPPORT_MC_LS */
661 	if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
662 		*flags |= AMD_CG_SUPPORT_MC_LS;
663 }
664 
665 static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
666 	{ "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
667 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
668 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
669 	},
670 	{ "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
671 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
672 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
673 	},
674 	{ "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
675 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
676 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
677 	},
678 	{ "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
679 	SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
680 	SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT),
681 	},
682 	{ "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
683 	SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
684 	SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT),
685 	},
686 	{ "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
687 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
688 	0, 0,
689 	},
690 	{ "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
691 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
692 	0, 0,
693 	},
694 	{ "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
695 	SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT),
696 	0, 0,
697 	},
698 	{ "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
699 	SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
700 	0, 0,
701 	},
702 	{ "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
703 	SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
704 	0, 0,
705 	},
706 	{ "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
707 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
708 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
709 	},
710 	{ "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
711 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
712 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
713 	},
714 	{ "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
715 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
716 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
717 	},
718 	{ "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
719 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
720 	0, 0,
721 	},
722 	{ "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
723 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
724 	0, 0,
725 	},
726 	{ "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
727 	0, 0,
728 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
729 	},
730 	{ "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
731 	0, 0,
732 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
733 	},
734 	{ "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
735 	0, 0,
736 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
737 	},
738 	{ "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
739 	0, 0,
740 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
741 	},
742 	{ "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
743 	0, 0,
744 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
745 	},
746 	{ "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
747 	0, 0,
748 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
749 	},
750 	{ "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
751 	0, 0,
752 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
753 	},
754 	{ "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
755 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
756 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
757 	},
758 	{ "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
759 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
760 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
761 	},
762 	{ "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
763 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
764 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
765 	},
766 	{ "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
767 	SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
768 	SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT),
769 	},
770 	{ "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
771 	SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
772 	SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT),
773 	},
774 	{ "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
775 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
776 	0, 0,
777 	},
778 	{ "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
779 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
780 	0, 0,
781 	},
782 	{ "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
783 	SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT),
784 	0, 0,
785 	},
786 	{ "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
787 	SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
788 	0, 0,
789 	},
790 	{ "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
791 	SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
792 	0, 0,
793 	},
794 	{ "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
795 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
796 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
797 	},
798 	{ "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
799 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
800 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
801 	},
802 	{ "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
803 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
804 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
805 	},
806 	{ "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
807 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
808 	0, 0,
809 	},
810 	{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
811 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
812 	0, 0,
813 	},
814 	{ "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
815 	0, 0,
816 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
817 	},
818 	{ "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
819 	0, 0,
820 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
821 	},
822 	{ "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
823 	0, 0,
824 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
825 	},
826 	{ "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
827 	0, 0,
828 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
829 	},
830 	{ "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
831 	0, 0,
832 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
833 	},
834 	{ "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
835 	0, 0,
836 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
837 	},
838 	{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
839 	0, 0,
840 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
841 	}
842 };
843 
844 static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
845    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0},
846    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0},
847    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0},
848    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0},
849    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0},
850    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0},
851 };
852 
853 static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
854 	uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
855 {
856 	uint32_t i;
857 	uint32_t sec_cnt, ded_cnt;
858 
859 	for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) {
860 		if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset)
861 			continue;
862 
863 		sec_cnt = (value &
864 				mmhub_v9_4_ras_fields[i].sec_count_mask) >>
865 				mmhub_v9_4_ras_fields[i].sec_count_shift;
866 		if (sec_cnt) {
867 			DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
868 				mmhub_v9_4_ras_fields[i].name,
869 				sec_cnt);
870 			*sec_count += sec_cnt;
871 		}
872 
873 		ded_cnt = (value &
874 				mmhub_v9_4_ras_fields[i].ded_count_mask) >>
875 				mmhub_v9_4_ras_fields[i].ded_count_shift;
876 		if (ded_cnt) {
877 			DRM_INFO("MMHUB SubBlock %s, DED %d\n",
878 				mmhub_v9_4_ras_fields[i].name,
879 				ded_cnt);
880 			*ded_count += ded_cnt;
881 		}
882 	}
883 
884 	return 0;
885 }
886 
887 static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
888 					   void *ras_error_status)
889 {
890 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
891 	uint32_t sec_count = 0, ded_count = 0;
892 	uint32_t i;
893 	uint32_t reg_value;
894 
895 	err_data->ue_count = 0;
896 	err_data->ce_count = 0;
897 
898 	for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) {
899 		reg_value =
900 			RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
901 		if (reg_value)
902 			mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
903 				reg_value, &sec_count, &ded_count);
904 	}
905 
906 	err_data->ce_count += sec_count;
907 	err_data->ue_count += ded_count;
908 }
909 
910 const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
911 	.ras_late_init = amdgpu_mmhub_ras_late_init,
912 	.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
913 };
914