1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "amdgpu_xcp.h"
25 #include "gfxhub_v1_2.h"
26 #include "gfxhub_v1_1.h"
27 
28 #include "gc/gc_9_4_3_offset.h"
29 #include "gc/gc_9_4_3_sh_mask.h"
30 #include "vega10_enum.h"
31 
32 #include "soc15_common.h"
33 
34 #define regVM_L2_CNTL3_DEFAULT	0x80100007
35 #define regVM_L2_CNTL4_DEFAULT	0x000000c1
36 
37 static u64 gfxhub_v1_2_get_mc_fb_offset(struct amdgpu_device *adev)
38 {
39 	return (u64)RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_FB_OFFSET) << 24;
40 }
41 
42 static void gfxhub_v1_2_xcc_setup_vm_pt_regs(struct amdgpu_device *adev,
43 					     uint32_t vmid,
44 					     uint64_t page_table_base,
45 					     uint32_t xcc_mask)
46 {
47 	struct amdgpu_vmhub *hub;
48 	int i;
49 
50 	for_each_inst(i, xcc_mask) {
51 		hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
52 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
53 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
54 				    hub->ctx_addr_distance * vmid,
55 				    lower_32_bits(page_table_base));
56 
57 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
58 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
59 				    hub->ctx_addr_distance * vmid,
60 				    upper_32_bits(page_table_base));
61 	}
62 }
63 
64 static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
65 					 uint32_t vmid,
66 					 uint64_t page_table_base)
67 {
68 	uint32_t xcc_mask;
69 
70 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
71 	gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, vmid, page_table_base, xcc_mask);
72 }
73 
74 static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
75 						    uint32_t xcc_mask)
76 {
77 	uint64_t pt_base;
78 	int i;
79 
80 	if (adev->gmc.pdb0_bo)
81 		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
82 	else
83 		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
84 
85 	gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, 0, pt_base, xcc_mask);
86 
87 	/* If use GART for FB translation, vmid0 page table covers both
88 	 * vram and system memory (gart)
89 	 */
90 	for_each_inst(i, xcc_mask) {
91 		if (adev->gmc.pdb0_bo) {
92 			WREG32_SOC15(GC, GET_INST(GC, i),
93 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
94 				     (u32)(adev->gmc.fb_start >> 12));
95 			WREG32_SOC15(GC, GET_INST(GC, i),
96 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
97 				     (u32)(adev->gmc.fb_start >> 44));
98 
99 			WREG32_SOC15(GC, GET_INST(GC, i),
100 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
101 				     (u32)(adev->gmc.gart_end >> 12));
102 			WREG32_SOC15(GC, GET_INST(GC, i),
103 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
104 				     (u32)(adev->gmc.gart_end >> 44));
105 		} else {
106 			WREG32_SOC15(GC, GET_INST(GC, i),
107 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
108 				     (u32)(adev->gmc.gart_start >> 12));
109 			WREG32_SOC15(GC, GET_INST(GC, i),
110 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
111 				     (u32)(adev->gmc.gart_start >> 44));
112 
113 			WREG32_SOC15(GC, GET_INST(GC, i),
114 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
115 				     (u32)(adev->gmc.gart_end >> 12));
116 			WREG32_SOC15(GC, GET_INST(GC, i),
117 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
118 				     (u32)(adev->gmc.gart_end >> 44));
119 		}
120 	}
121 }
122 
123 static void
124 gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
125 					  uint32_t xcc_mask)
126 {
127 	uint64_t value;
128 	uint32_t tmp;
129 	int i;
130 
131 	for_each_inst(i, xcc_mask) {
132 		/* Program the AGP BAR */
133 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BASE, 0);
134 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
135 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
136 
137 		if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
138 			/* Program the system aperture low logical page number. */
139 			WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
140 				min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
141 
142 			if (adev->apu_flags & AMD_APU_IS_RAVEN2)
143 				/*
144 				* Raven2 has a HW issue that it is unable to use the
145 				* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
146 				* So here is the workaround that increase system
147 				* aperture high address (add 1) to get rid of the VM
148 				* fault and hardware hang.
149 				*/
150 				WREG32_SOC15_RLC(GC, GET_INST(GC, i),
151 						 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
152 						 max((adev->gmc.fb_end >> 18) + 0x1,
153 						     adev->gmc.agp_end >> 18));
154 			else
155 				WREG32_SOC15_RLC(GC, GET_INST(GC, i),
156 					regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
157 					max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
158 
159 			/* Set default page address. */
160 			value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
161 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
162 				     (u32)(value >> 12));
163 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
164 				     (u32)(value >> 44));
165 
166 			/* Program "protection fault". */
167 			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
168 				     (u32)(adev->dummy_page_addr >> 12));
169 			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
170 				     (u32)((u64)adev->dummy_page_addr >> 44));
171 
172 			tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2);
173 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
174 					    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
175 			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
176 		}
177 
178 		/* In the case squeezing vram into GART aperture, we don't use
179 		 * FB aperture and AGP aperture. Disable them.
180 		 */
181 		if (adev->gmc.pdb0_bo) {
182 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0);
183 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
184 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0);
185 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, 0xFFFFFF);
186 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
187 			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
188 		}
189 	}
190 }
191 
192 static void gfxhub_v1_2_xcc_init_tlb_regs(struct amdgpu_device *adev,
193 					  uint32_t xcc_mask)
194 {
195 	uint32_t tmp;
196 	int i;
197 
198 	for_each_inst(i, xcc_mask) {
199 		/* Setup TLB control */
200 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL);
201 
202 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
203 				    ENABLE_L1_TLB, 1);
204 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
205 				    SYSTEM_ACCESS_MODE, 3);
206 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
207 				    ENABLE_ADVANCED_DRIVER_MODEL, 1);
208 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
209 				    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
210 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
211 				    MTYPE, MTYPE_UC);/* XXX for emulation. */
212 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
213 
214 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL, tmp);
215 	}
216 }
217 
218 static void gfxhub_v1_2_xcc_init_cache_regs(struct amdgpu_device *adev,
219 					    uint32_t xcc_mask)
220 {
221 	uint32_t tmp;
222 	int i;
223 
224 	for_each_inst(i, xcc_mask) {
225 		/* Setup L2 cache */
226 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL);
227 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
228 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
229 		/* XXX for emulation, Refer to closed source code.*/
230 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
231 				    0);
232 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
233 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
234 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
235 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL, tmp);
236 
237 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL2);
238 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
239 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
240 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL2, tmp);
241 
242 		tmp = regVM_L2_CNTL3_DEFAULT;
243 		if (adev->gmc.translate_further) {
244 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
245 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
246 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
247 		} else {
248 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
249 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
250 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
251 		}
252 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL3, tmp);
253 
254 		tmp = regVM_L2_CNTL4_DEFAULT;
255 		/* For AMD APP APUs setup WC memory */
256 		if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
257 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
258 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
259 		} else {
260 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
261 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
262 		}
263 		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL4, tmp);
264 	}
265 }
266 
267 static void gfxhub_v1_2_xcc_enable_system_domain(struct amdgpu_device *adev,
268 						 uint32_t xcc_mask)
269 {
270 	uint32_t tmp;
271 	int i;
272 
273 	for_each_inst(i, xcc_mask) {
274 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
275 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
276 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
277 				adev->gmc.vmid0_page_table_depth);
278 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
279 				adev->gmc.vmid0_page_table_block_size);
280 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
281 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
282 		WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL, tmp);
283 	}
284 }
285 
286 static void
287 gfxhub_v1_2_xcc_disable_identity_aperture(struct amdgpu_device *adev,
288 					  uint32_t xcc_mask)
289 {
290 	int i;
291 
292 	for_each_inst(i, xcc_mask) {
293 		WREG32_SOC15(GC, GET_INST(GC, i),
294 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
295 			     0XFFFFFFFF);
296 		WREG32_SOC15(GC, GET_INST(GC, i),
297 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
298 			     0x0000000F);
299 
300 		WREG32_SOC15(GC, GET_INST(GC, i),
301 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
302 			     0);
303 		WREG32_SOC15(GC, GET_INST(GC, i),
304 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
305 			     0);
306 
307 		WREG32_SOC15(GC, GET_INST(GC, i),
308 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
309 		WREG32_SOC15(GC, GET_INST(GC, i),
310 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
311 	}
312 }
313 
314 static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
315 					      uint32_t xcc_mask)
316 {
317 	struct amdgpu_vmhub *hub;
318 	unsigned num_level, block_size;
319 	uint32_t tmp;
320 	int i, j;
321 
322 	num_level = adev->vm_manager.num_level;
323 	block_size = adev->vm_manager.block_size;
324 	if (adev->gmc.translate_further)
325 		num_level -= 1;
326 	else
327 		block_size -= 9;
328 
329 	for_each_inst(j, xcc_mask) {
330 		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
331 		for (i = 0; i <= 14; i++) {
332 			tmp = RREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL, i);
333 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
334 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
335 					    num_level);
336 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
337 					    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
338 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
339 					    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
340 					    1);
341 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
342 					    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
343 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
344 					    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
345 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
346 					    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
347 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
348 					    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
349 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
350 					    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
351 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
352 					    PAGE_TABLE_BLOCK_SIZE,
353 					    block_size);
354 			/* Send no-retry XNACK on fault to suppress VM fault storm.
355 			 * On 9.4.2 and 9.4.3, XNACK can be enabled in
356 			 * the SQ per-process.
357 			 * Retry faults need to be enabled for that to work.
358 			 */
359 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
360 					    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
361 					    !adev->gmc.noretry ||
362 					    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
363 					    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3));
364 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
365 					    i * hub->ctx_distance, tmp);
366 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
367 					    regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
368 					    i * hub->ctx_addr_distance, 0);
369 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
370 					    regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
371 					    i * hub->ctx_addr_distance, 0);
372 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
373 					    regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
374 					    i * hub->ctx_addr_distance,
375 					    lower_32_bits(adev->vm_manager.max_pfn - 1));
376 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
377 					    regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
378 					    i * hub->ctx_addr_distance,
379 					    upper_32_bits(adev->vm_manager.max_pfn - 1));
380 		}
381 	}
382 }
383 
384 static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev,
385 						 uint32_t xcc_mask)
386 {
387 	struct amdgpu_vmhub *hub;
388 	unsigned int i, j;
389 
390 	for_each_inst(j, xcc_mask) {
391 		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
392 
393 		for (i = 0 ; i < 18; ++i) {
394 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
395 					    i * hub->eng_addr_distance, 0xffffffff);
396 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
397 					    i * hub->eng_addr_distance, 0x1f);
398 		}
399 	}
400 }
401 
402 static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev,
403 				       uint32_t xcc_mask)
404 {
405 	int i;
406 
407 	/*
408 	 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are
409 	 * VF copy registers so vbios post doesn't program them, for
410 	 * SRIOV driver need to program them
411 	 */
412 	if (amdgpu_sriov_vf(adev)) {
413 		for_each_inst(i, xcc_mask) {
414 			WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE,
415 				     adev->gmc.vram_start >> 24);
416 			WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP,
417 				     adev->gmc.vram_end >> 24);
418 		}
419 	}
420 
421 	/* GART Enable. */
422 	gfxhub_v1_2_xcc_init_gart_aperture_regs(adev, xcc_mask);
423 	gfxhub_v1_2_xcc_init_system_aperture_regs(adev, xcc_mask);
424 	gfxhub_v1_2_xcc_init_tlb_regs(adev, xcc_mask);
425 	if (!amdgpu_sriov_vf(adev))
426 		gfxhub_v1_2_xcc_init_cache_regs(adev, xcc_mask);
427 
428 	gfxhub_v1_2_xcc_enable_system_domain(adev, xcc_mask);
429 	if (!amdgpu_sriov_vf(adev))
430 		gfxhub_v1_2_xcc_disable_identity_aperture(adev, xcc_mask);
431 	gfxhub_v1_2_xcc_setup_vmid_config(adev, xcc_mask);
432 	gfxhub_v1_2_xcc_program_invalidation(adev, xcc_mask);
433 
434 	return 0;
435 }
436 
437 static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev)
438 {
439 	uint32_t xcc_mask;
440 
441 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
442 	return gfxhub_v1_2_xcc_gart_enable(adev, xcc_mask);
443 }
444 
445 static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev,
446 					 uint32_t xcc_mask)
447 {
448 	struct amdgpu_vmhub *hub;
449 	u32 tmp;
450 	u32 i, j;
451 
452 	for_each_inst(j, xcc_mask) {
453 		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
454 		/* Disable all tables */
455 		for (i = 0; i < 16; i++)
456 			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT0_CNTL,
457 					    i * hub->ctx_distance, 0);
458 
459 		/* Setup TLB control */
460 		tmp = RREG32_SOC15(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL);
461 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
462 		tmp = REG_SET_FIELD(tmp,
463 					MC_VM_MX_L1_TLB_CNTL,
464 					ENABLE_ADVANCED_DRIVER_MODEL,
465 					0);
466 		WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp);
467 
468 		/* Setup L2 cache */
469 		tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
470 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
471 		WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
472 		WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
473 	}
474 }
475 
476 static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev)
477 {
478 	uint32_t xcc_mask;
479 
480 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
481 	gfxhub_v1_2_xcc_gart_disable(adev, xcc_mask);
482 }
483 
484 static void gfxhub_v1_2_xcc_set_fault_enable_default(struct amdgpu_device *adev,
485 						     bool value,
486 						     uint32_t xcc_mask)
487 {
488 	u32 tmp;
489 	int i;
490 
491 	for_each_inst(i, xcc_mask) {
492 		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
493 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
494 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
495 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
496 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
497 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
498 				PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
499 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
500 				PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
501 		tmp = REG_SET_FIELD(tmp,
502 				VM_L2_PROTECTION_FAULT_CNTL,
503 				TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
504 				value);
505 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
506 				NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
507 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
508 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
509 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
510 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
511 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
512 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
513 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
514 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
515 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
516 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
517 		if (!value) {
518 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
519 					CRASH_ON_NO_RETRY_FAULT, 1);
520 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
521 					CRASH_ON_RETRY_FAULT, 1);
522 		}
523 		WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL, tmp);
524 	}
525 }
526 
527 /**
528  * gfxhub_v1_2_set_fault_enable_default - update GART/VM fault handling
529  *
530  * @adev: amdgpu_device pointer
531  * @value: true redirects VM faults to the default page
532  */
533 static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev,
534 						 bool value)
535 {
536 	uint32_t xcc_mask;
537 
538 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
539 	gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, xcc_mask);
540 }
541 
542 static void gfxhub_v1_2_xcc_init(struct amdgpu_device *adev, uint32_t xcc_mask)
543 {
544 	struct amdgpu_vmhub *hub;
545 	int i;
546 
547 	for_each_inst(i, xcc_mask) {
548 		hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
549 
550 		hub->ctx0_ptb_addr_lo32 =
551 			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
552 				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
553 		hub->ctx0_ptb_addr_hi32 =
554 			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
555 				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
556 		hub->vm_inv_eng0_sem =
557 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_SEM);
558 		hub->vm_inv_eng0_req =
559 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_REQ);
560 		hub->vm_inv_eng0_ack =
561 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_ACK);
562 		hub->vm_context0_cntl =
563 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
564 		hub->vm_l2_pro_fault_status =
565 			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
566 				regVM_L2_PROTECTION_FAULT_STATUS);
567 		hub->vm_l2_pro_fault_cntl =
568 			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
569 
570 		hub->ctx_distance = regVM_CONTEXT1_CNTL -
571 				regVM_CONTEXT0_CNTL;
572 		hub->ctx_addr_distance =
573 				regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
574 				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
575 		hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
576 				regVM_INVALIDATE_ENG0_REQ;
577 		hub->eng_addr_distance =
578 				regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
579 				regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
580 	}
581 }
582 
583 static void gfxhub_v1_2_init(struct amdgpu_device *adev)
584 {
585 	uint32_t xcc_mask;
586 
587 	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
588 	gfxhub_v1_2_xcc_init(adev, xcc_mask);
589 }
590 
591 static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev)
592 {
593 	u32 max_num_physical_nodes;
594 	u32 max_physical_node_id;
595 	u32 xgmi_lfb_cntl;
596 	u32 max_region;
597 	u64 seg_size;
598 
599 	xgmi_lfb_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_CNTL);
600 	seg_size = REG_GET_FIELD(
601 		RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_SIZE),
602 		MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
603 	max_region =
604 		REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
605 
606 
607 
608 	max_num_physical_nodes   = 8;
609 	max_physical_node_id     = 7;
610 
611 	/* PF_MAX_REGION=0 means xgmi is disabled */
612 	if (max_region || adev->gmc.xgmi.connected_to_cpu) {
613 		adev->gmc.xgmi.num_physical_nodes = max_region + 1;
614 
615 		if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
616 			return -EINVAL;
617 
618 		adev->gmc.xgmi.physical_node_id =
619 			REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL,
620 					PF_LFB_REGION);
621 
622 		if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
623 			return -EINVAL;
624 
625 		adev->gmc.xgmi.node_segment_size = seg_size;
626 	}
627 
628 	return 0;
629 }
630 
631 const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
632 	.get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
633 	.setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
634 	.gart_enable = gfxhub_v1_2_gart_enable,
635 	.gart_disable = gfxhub_v1_2_gart_disable,
636 	.set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
637 	.init = gfxhub_v1_2_init,
638 	.get_xgmi_info = gfxhub_v1_2_get_xgmi_info,
639 };
640 
641 static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask)
642 {
643 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
644 	bool value;
645 
646 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
647 		value = false;
648 	else
649 		value = true;
650 
651 	gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, inst_mask);
652 
653 	if (!amdgpu_sriov_vf(adev))
654 		return gfxhub_v1_2_xcc_gart_enable(adev, inst_mask);
655 
656 	return 0;
657 }
658 
659 static int gfxhub_v1_2_xcp_suspend(void *handle, uint32_t inst_mask)
660 {
661 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
662 
663 	if (!amdgpu_sriov_vf(adev))
664 		gfxhub_v1_2_xcc_gart_disable(adev, inst_mask);
665 
666 	return 0;
667 }
668 
669 struct amdgpu_xcp_ip_funcs gfxhub_v1_2_xcp_funcs = {
670 	.suspend = &gfxhub_v1_2_xcp_suspend,
671 	.resume = &gfxhub_v1_2_xcp_resume
672 };
673