1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_ras.h"
25 #include "mmhub_v9_4.h"
26 
27 #include "mmhub/mmhub_9_4_1_offset.h"
28 #include "mmhub/mmhub_9_4_1_sh_mask.h"
29 #include "mmhub/mmhub_9_4_1_default.h"
30 #include "athub/athub_1_0_offset.h"
31 #include "athub/athub_1_0_sh_mask.h"
32 #include "vega10_enum.h"
33 #include "soc15.h"
34 #include "soc15_common.h"
35 
36 #define MMHUB_NUM_INSTANCES			2
37 #define MMHUB_INSTANCE_REGISTER_OFFSET		0x3000
38 
39 u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
40 {
41 	/* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */
42 	u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE);
43 	u64 top = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP);
44 
45 	base &= VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
46 	base <<= 24;
47 
48 	top &= VMSHAREDVC0_MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
49 	top <<= 24;
50 
51 	adev->gmc.fb_start = base;
52 	adev->gmc.fb_end = top;
53 
54 	return base;
55 }
56 
57 static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid,
58 				uint32_t vmid, uint64_t value)
59 {
60 	/* two registers distance between mmVML2VC0_VM_CONTEXT0_* to
61 	 * mmVML2VC0_VM_CONTEXT1_*
62 	 */
63 	int dist = mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
64 			- mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
65 
66 	WREG32_SOC15_OFFSET(MMHUB, 0,
67 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
68 			    dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
69 			    lower_32_bits(value));
70 
71 	WREG32_SOC15_OFFSET(MMHUB, 0,
72 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
73 			    dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
74 			    upper_32_bits(value));
75 
76 }
77 
78 static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
79 					       int hubid)
80 {
81 	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
82 
83 	mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base);
84 
85 	WREG32_SOC15_OFFSET(MMHUB, 0,
86 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
87 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
88 			    (u32)(adev->gmc.gart_start >> 12));
89 	WREG32_SOC15_OFFSET(MMHUB, 0,
90 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
91 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
92 			    (u32)(adev->gmc.gart_start >> 44));
93 
94 	WREG32_SOC15_OFFSET(MMHUB, 0,
95 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
96 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
97 			    (u32)(adev->gmc.gart_end >> 12));
98 	WREG32_SOC15_OFFSET(MMHUB, 0,
99 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
100 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
101 			    (u32)(adev->gmc.gart_end >> 44));
102 }
103 
104 void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
105 				uint64_t page_table_base)
106 {
107 	int i;
108 
109 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++)
110 		mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid,
111 				page_table_base);
112 }
113 
114 static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
115 					         int hubid)
116 {
117 	uint64_t value;
118 	uint32_t tmp;
119 
120 	/* Program the AGP BAR */
121 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BASE,
122 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
123 			    0);
124 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_TOP,
125 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
126 			    adev->gmc.agp_end >> 24);
127 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BOT,
128 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
129 			    adev->gmc.agp_start >> 24);
130 
131 	/* Program the system aperture low logical page number. */
132 	WREG32_SOC15_OFFSET(MMHUB, 0,
133 			    mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
134 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
135 			    min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
136 	WREG32_SOC15_OFFSET(MMHUB, 0,
137 			    mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
138 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
139 			    max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
140 
141 	/* Set default page address. */
142 	value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
143 		adev->vm_manager.vram_base_offset;
144 	WREG32_SOC15_OFFSET(MMHUB, 0,
145 			mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
146 			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
147 			(u32)(value >> 12));
148 	WREG32_SOC15_OFFSET(MMHUB, 0,
149 			mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
150 			hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
151 			(u32)(value >> 44));
152 
153 	/* Program "protection fault". */
154 	WREG32_SOC15_OFFSET(MMHUB, 0,
155 			    mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
156 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
157 			    (u32)(adev->dummy_page_addr >> 12));
158 	WREG32_SOC15_OFFSET(MMHUB, 0,
159 			    mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
160 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
161 			    (u32)((u64)adev->dummy_page_addr >> 44));
162 
163 	tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
164 				  mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
165 				  hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
166 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
167 			    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
168 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
169 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
170 }
171 
172 static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
173 {
174 	uint32_t tmp;
175 
176 	/* Setup TLB control */
177 	tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
178 			   mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
179 			   hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
180 
181 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
182 			    ENABLE_L1_TLB, 1);
183 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
184 			    SYSTEM_ACCESS_MODE, 3);
185 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
186 			    ENABLE_ADVANCED_DRIVER_MODEL, 1);
187 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
188 			    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
189 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
190 			    ECO_BITS, 0);
191 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
192 			    MTYPE, MTYPE_UC);/* XXX for emulation. */
193 	tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
194 			    ATC_EN, 1);
195 
196 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
197 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
198 }
199 
200 static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
201 {
202 	uint32_t tmp;
203 
204 	/* Setup L2 cache */
205 	tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
206 				  hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
207 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
208 			    ENABLE_L2_CACHE, 1);
209 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
210 			    ENABLE_L2_FRAGMENT_PROCESSING, 1);
211 	/* XXX for emulation, Refer to closed source code.*/
212 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
213 			    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
214 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
215 			    PDE_FAULT_CLASSIFICATION, 0);
216 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
217 			    CONTEXT1_IDENTITY_ACCESS_MODE, 1);
218 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
219 			    IDENTITY_MODE_FRAGMENT_SIZE, 0);
220 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
221 		     hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
222 
223 	tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
224 				  hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
225 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
226 			    INVALIDATE_ALL_L1_TLBS, 1);
227 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
228 			    INVALIDATE_L2_CACHE, 1);
229 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
230 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
231 
232 	tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
233 	if (adev->gmc.translate_further) {
234 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
235 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
236 				    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
237 	} else {
238 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
239 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
240 				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
241 	}
242 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
243 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
244 
245 	tmp = mmVML2PF0_VM_L2_CNTL4_DEFAULT;
246 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
247 			    VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
248 	tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
249 			    VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
250 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL4,
251 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
252 }
253 
254 static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev,
255 					    int hubid)
256 {
257 	uint32_t tmp;
258 
259 	tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
260 				  hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
261 	tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
262 	tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
263 	tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL,
264 			    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
265 	WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
266 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
267 }
268 
269 static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
270 						 int hubid)
271 {
272 	WREG32_SOC15_OFFSET(MMHUB, 0,
273 		    mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
274 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0XFFFFFFFF);
275 	WREG32_SOC15_OFFSET(MMHUB, 0,
276 		    mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
277 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0x0000000F);
278 
279 	WREG32_SOC15_OFFSET(MMHUB, 0,
280 		    mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
281 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
282 	WREG32_SOC15_OFFSET(MMHUB, 0,
283 		    mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
284 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
285 
286 	WREG32_SOC15_OFFSET(MMHUB, 0,
287 		    mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
288 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
289 	WREG32_SOC15_OFFSET(MMHUB, 0,
290 		    mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
291 		    hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
292 }
293 
294 static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
295 {
296 	uint32_t tmp;
297 	int i;
298 
299 	for (i = 0; i <= 14; i++) {
300 		tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
301 				hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
302 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
303 				    ENABLE_CONTEXT, 1);
304 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
305 				    PAGE_TABLE_DEPTH,
306 				    adev->vm_manager.num_level);
307 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
308 				    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
309 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
310 				    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
311 				    1);
312 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
313 				    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
314 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
315 				    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
316 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
317 				    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
318 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
319 				    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
320 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
321 				    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
322 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
323 				    PAGE_TABLE_BLOCK_SIZE,
324 				    adev->vm_manager.block_size - 9);
325 		/* Send no-retry XNACK on fault to suppress VM fault storm. */
326 		tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
327 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
328 				    !amdgpu_noretry);
329 		WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
330 				    hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i,
331 				    tmp);
332 		WREG32_SOC15_OFFSET(MMHUB, 0,
333 			    mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
334 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0);
335 		WREG32_SOC15_OFFSET(MMHUB, 0,
336 			    mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
337 			    hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0);
338 		WREG32_SOC15_OFFSET(MMHUB, 0,
339 				mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
340 				hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2,
341 				lower_32_bits(adev->vm_manager.max_pfn - 1));
342 		WREG32_SOC15_OFFSET(MMHUB, 0,
343 				mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
344 				hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2,
345 				upper_32_bits(adev->vm_manager.max_pfn - 1));
346 	}
347 }
348 
349 static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev,
350 					    int hubid)
351 {
352 	unsigned i;
353 
354 	for (i = 0; i < 18; ++i) {
355 		WREG32_SOC15_OFFSET(MMHUB, 0,
356 				mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
357 				hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i,
358 				0xffffffff);
359 		WREG32_SOC15_OFFSET(MMHUB, 0,
360 				mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
361 				hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i,
362 				0x1f);
363 	}
364 }
365 
366 int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
367 {
368 	int i;
369 
370 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
371 		if (amdgpu_sriov_vf(adev)) {
372 			/*
373 			 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase
374 			 * they are VF copy registers so vbios post doesn't
375 			 * program them, for SRIOV driver need to program them
376 			 */
377 			WREG32_SOC15_OFFSET(MMHUB, 0,
378 				     mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE,
379 				     i * MMHUB_INSTANCE_REGISTER_OFFSET,
380 				     adev->gmc.vram_start >> 24);
381 			WREG32_SOC15_OFFSET(MMHUB, 0,
382 				     mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP,
383 				     i * MMHUB_INSTANCE_REGISTER_OFFSET,
384 				     adev->gmc.vram_end >> 24);
385 		}
386 
387 		/* GART Enable. */
388 		mmhub_v9_4_init_gart_aperture_regs(adev, i);
389 		mmhub_v9_4_init_system_aperture_regs(adev, i);
390 		mmhub_v9_4_init_tlb_regs(adev, i);
391 		mmhub_v9_4_init_cache_regs(adev, i);
392 
393 		mmhub_v9_4_enable_system_domain(adev, i);
394 		mmhub_v9_4_disable_identity_aperture(adev, i);
395 		mmhub_v9_4_setup_vmid_config(adev, i);
396 		mmhub_v9_4_program_invalidation(adev, i);
397 	}
398 
399 	return 0;
400 }
401 
402 void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
403 {
404 	u32 tmp;
405 	u32 i, j;
406 
407 	for (j = 0; j < MMHUB_NUM_INSTANCES; j++) {
408 		/* Disable all tables */
409 		for (i = 0; i < 16; i++)
410 			WREG32_SOC15_OFFSET(MMHUB, 0,
411 					    mmVML2VC0_VM_CONTEXT0_CNTL,
412 					    j * MMHUB_INSTANCE_REGISTER_OFFSET +
413 					    i, 0);
414 
415 		/* Setup TLB control */
416 		tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
417 				   mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
418 				   j * MMHUB_INSTANCE_REGISTER_OFFSET);
419 		tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
420 				    ENABLE_L1_TLB, 0);
421 		tmp = REG_SET_FIELD(tmp,
422 				    VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
423 				    ENABLE_ADVANCED_DRIVER_MODEL, 0);
424 		WREG32_SOC15_OFFSET(MMHUB, 0,
425 				    mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
426 				    j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
427 
428 		/* Setup L2 cache */
429 		tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
430 					  j * MMHUB_INSTANCE_REGISTER_OFFSET);
431 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
432 				    ENABLE_L2_CACHE, 0);
433 		WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
434 				    j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
435 		WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
436 				    j * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
437 	}
438 }
439 
440 /**
441  * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
442  *
443  * @adev: amdgpu_device pointer
444  * @value: true redirects VM faults to the default page
445  */
446 void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
447 {
448 	u32 tmp;
449 	int i;
450 
451 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
452 		tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
453 					  mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
454 					  i * MMHUB_INSTANCE_REGISTER_OFFSET);
455 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
456 				    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT,
457 				    value);
458 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
459 				    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT,
460 				    value);
461 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
462 				    PDE1_PROTECTION_FAULT_ENABLE_DEFAULT,
463 				    value);
464 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
465 				    PDE2_PROTECTION_FAULT_ENABLE_DEFAULT,
466 				    value);
467 		tmp = REG_SET_FIELD(tmp,
468 			    VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
469 			    TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
470 			    value);
471 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
472 				    NACK_PROTECTION_FAULT_ENABLE_DEFAULT,
473 				    value);
474 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
475 				    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
476 				    value);
477 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
478 				    VALID_PROTECTION_FAULT_ENABLE_DEFAULT,
479 				    value);
480 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
481 				    READ_PROTECTION_FAULT_ENABLE_DEFAULT,
482 				    value);
483 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
484 				    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT,
485 				    value);
486 		tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
487 				    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT,
488 				    value);
489 		if (!value) {
490 			tmp = REG_SET_FIELD(tmp,
491 					    VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
492 					    CRASH_ON_NO_RETRY_FAULT, 1);
493 			tmp = REG_SET_FIELD(tmp,
494 					    VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
495 					    CRASH_ON_RETRY_FAULT, 1);
496 		}
497 
498 		WREG32_SOC15_OFFSET(MMHUB, 0,
499 				    mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
500 				    i * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
501 	}
502 }
503 
504 void mmhub_v9_4_init(struct amdgpu_device *adev)
505 {
506 	struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] =
507 		{&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]};
508 	int i;
509 
510 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
511 		hub[i]->ctx0_ptb_addr_lo32 =
512 			SOC15_REG_OFFSET(MMHUB, 0,
513 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) +
514 			    i * MMHUB_INSTANCE_REGISTER_OFFSET;
515 		hub[i]->ctx0_ptb_addr_hi32 =
516 			SOC15_REG_OFFSET(MMHUB, 0,
517 			    mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
518 			    i * MMHUB_INSTANCE_REGISTER_OFFSET;
519 		hub[i]->vm_inv_eng0_sem =
520 			SOC15_REG_OFFSET(MMHUB, 0,
521 					 mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
522 					 i * MMHUB_INSTANCE_REGISTER_OFFSET;
523 		hub[i]->vm_inv_eng0_req =
524 			SOC15_REG_OFFSET(MMHUB, 0,
525 					 mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
526 					 i * MMHUB_INSTANCE_REGISTER_OFFSET;
527 		hub[i]->vm_inv_eng0_ack =
528 			SOC15_REG_OFFSET(MMHUB, 0,
529 					 mmVML2VC0_VM_INVALIDATE_ENG0_ACK) +
530 					 i * MMHUB_INSTANCE_REGISTER_OFFSET;
531 		hub[i]->vm_context0_cntl =
532 			SOC15_REG_OFFSET(MMHUB, 0,
533 					 mmVML2VC0_VM_CONTEXT0_CNTL) +
534 					 i * MMHUB_INSTANCE_REGISTER_OFFSET;
535 		hub[i]->vm_l2_pro_fault_status =
536 			SOC15_REG_OFFSET(MMHUB, 0,
537 				    mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS) +
538 				    i * MMHUB_INSTANCE_REGISTER_OFFSET;
539 		hub[i]->vm_l2_pro_fault_cntl =
540 			SOC15_REG_OFFSET(MMHUB, 0,
541 				    mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL) +
542 				    i * MMHUB_INSTANCE_REGISTER_OFFSET;
543 	}
544 }
545 
546 static void mmhub_v9_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
547 							bool enable)
548 {
549 	uint32_t def, data, def1, data1;
550 	int i, j;
551 	int dist = mmDAGB1_CNTL_MISC2 - mmDAGB0_CNTL_MISC2;
552 
553 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
554 		def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
555 					mmATCL2_0_ATC_L2_MISC_CG,
556 					i * MMHUB_INSTANCE_REGISTER_OFFSET);
557 
558 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
559 			data |= ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
560 		else
561 			data &= ~ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
562 
563 		if (def != data)
564 			WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
565 				i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
566 
567 		for (j = 0; j < 5; j++) {
568 			def1 = data1 = RREG32_SOC15_OFFSET(MMHUB, 0,
569 					mmDAGB0_CNTL_MISC2,
570 					i * MMHUB_INSTANCE_REGISTER_OFFSET +
571 					j * dist);
572 			if (enable &&
573 			    (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
574 				data1 &=
575 				    ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
576 				    DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
577 				    DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
578 				    DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
579 				    DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
580 				    DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
581 			} else {
582 				data1 |=
583 				    (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
584 				    DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
585 				    DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
586 				    DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
587 				    DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
588 				    DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
589 			}
590 
591 			if (def1 != data1)
592 				WREG32_SOC15_OFFSET(MMHUB, 0,
593 					mmDAGB0_CNTL_MISC2,
594 					i * MMHUB_INSTANCE_REGISTER_OFFSET +
595 					j * dist, data1);
596 
597 			if (i == 1 && j == 3)
598 				break;
599 		}
600 	}
601 }
602 
603 static void mmhub_v9_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
604 						       bool enable)
605 {
606 	uint32_t def, data;
607 	int i;
608 
609 	for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
610 		def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
611 					mmATCL2_0_ATC_L2_MISC_CG,
612 					i * MMHUB_INSTANCE_REGISTER_OFFSET);
613 
614 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
615 			data |= ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
616 		else
617 			data &= ~ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
618 
619 		if (def != data)
620 			WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
621 				i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
622 	}
623 }
624 
625 int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
626 			       enum amd_clockgating_state state)
627 {
628 	if (amdgpu_sriov_vf(adev))
629 		return 0;
630 
631 	switch (adev->asic_type) {
632 	case CHIP_ARCTURUS:
633 		mmhub_v9_4_update_medium_grain_clock_gating(adev,
634 				state == AMD_CG_STATE_GATE ? true : false);
635 		mmhub_v9_4_update_medium_grain_light_sleep(adev,
636 				state == AMD_CG_STATE_GATE ? true : false);
637 		break;
638 	default:
639 		break;
640 	}
641 
642 	return 0;
643 }
644 
645 void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
646 {
647 	int data, data1;
648 
649 	if (amdgpu_sriov_vf(adev))
650 		*flags = 0;
651 
652 	/* AMD_CG_SUPPORT_MC_MGCG */
653 	data = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
654 
655 	data1 = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
656 
657 	if ((data & ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK) &&
658 	    !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
659 		       DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
660 		       DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
661 		       DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
662 		       DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
663 		       DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
664 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
665 
666 	/* AMD_CG_SUPPORT_MC_LS */
667 	if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
668 		*flags |= AMD_CG_SUPPORT_MC_LS;
669 }
670 
671 static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
672 	{ "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
673 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
674 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
675 	},
676 	{ "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
677 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
678 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
679 	},
680 	{ "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
681 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
682 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
683 	},
684 	{ "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
685 	SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
686 	SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT),
687 	},
688 	{ "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
689 	SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
690 	SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT),
691 	},
692 	{ "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
693 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
694 	0, 0,
695 	},
696 	{ "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
697 	SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
698 	0, 0,
699 	},
700 	{ "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
701 	SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT),
702 	0, 0,
703 	},
704 	{ "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
705 	SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
706 	0, 0,
707 	},
708 	{ "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
709 	SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
710 	0, 0,
711 	},
712 	{ "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
713 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
714 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
715 	},
716 	{ "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
717 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
718 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
719 	},
720 	{ "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
721 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
722 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
723 	},
724 	{ "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
725 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
726 	0, 0,
727 	},
728 	{ "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
729 	SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
730 	0, 0,
731 	},
732 	{ "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
733 	0, 0,
734 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
735 	},
736 	{ "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
737 	0, 0,
738 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
739 	},
740 	{ "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
741 	0, 0,
742 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
743 	},
744 	{ "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
745 	0, 0,
746 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
747 	},
748 	{ "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
749 	0, 0,
750 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
751 	},
752 	{ "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
753 	0, 0,
754 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
755 	},
756 	{ "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
757 	0, 0,
758 	SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
759 	},
760 	{ "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
761 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
762 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
763 	},
764 	{ "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
765 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
766 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
767 	},
768 	{ "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
769 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
770 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
771 	},
772 	{ "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
773 	SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
774 	SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT),
775 	},
776 	{ "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
777 	SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
778 	SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT),
779 	},
780 	{ "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
781 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
782 	0, 0,
783 	},
784 	{ "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
785 	SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
786 	0, 0,
787 	},
788 	{ "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
789 	SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT),
790 	0, 0,
791 	},
792 	{ "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
793 	SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
794 	0, 0,
795 	},
796 	{ "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
797 	SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
798 	0, 0,
799 	},
800 	{ "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
801 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
802 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
803 	},
804 	{ "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
805 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
806 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
807 	},
808 	{ "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
809 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
810 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
811 	},
812 	{ "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
813 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
814 	0, 0,
815 	},
816 	{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
817 	SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
818 	0, 0,
819 	},
820 	{ "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
821 	0, 0,
822 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
823 	},
824 	{ "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
825 	0, 0,
826 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
827 	},
828 	{ "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
829 	0, 0,
830 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
831 	},
832 	{ "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
833 	0, 0,
834 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
835 	},
836 	{ "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
837 	0, 0,
838 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
839 	},
840 	{ "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
841 	0, 0,
842 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
843 	},
844 	{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
845 	0, 0,
846 	SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
847 	}
848 };
849 
850 static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
851    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0},
852    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0},
853    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0},
854    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0},
855    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0},
856    { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0},
857 };
858 
859 static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
860 	uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
861 {
862 	uint32_t i;
863 	uint32_t sec_cnt, ded_cnt;
864 
865 	for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) {
866 		if(mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset)
867 			continue;
868 
869 		sec_cnt = (value &
870 				mmhub_v9_4_ras_fields[i].sec_count_mask) >>
871 				mmhub_v9_4_ras_fields[i].sec_count_shift;
872 		if (sec_cnt) {
873 			DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
874 				mmhub_v9_4_ras_fields[i].name,
875 				sec_cnt);
876 			*sec_count += sec_cnt;
877 		}
878 
879 		ded_cnt = (value &
880 				mmhub_v9_4_ras_fields[i].ded_count_mask) >>
881 				mmhub_v9_4_ras_fields[i].ded_count_shift;
882 		if (ded_cnt) {
883 			DRM_INFO("MMHUB SubBlock %s, DED %d\n",
884 				mmhub_v9_4_ras_fields[i].name,
885 				ded_cnt);
886 			*ded_count += ded_cnt;
887 		}
888 	}
889 
890 	return 0;
891 }
892 
893 static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
894 					   void *ras_error_status)
895 {
896 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
897 	uint32_t sec_count = 0, ded_count = 0;
898 	uint32_t i;
899 	uint32_t reg_value;
900 
901 	err_data->ue_count = 0;
902 	err_data->ce_count = 0;
903 
904 	for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) {
905 		reg_value =
906 			RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
907 		if (reg_value)
908 			mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
909 				reg_value, &sec_count, &ded_count);
910 	}
911 
912 	err_data->ce_count += sec_count;
913 	err_data->ue_count += ded_count;
914 }
915 
916 const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
917 	.ras_late_init = amdgpu_mmhub_ras_late_init,
918 	.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
919 };
920