1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "gfxhub_v2_0.h"
26 
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "gc/gc_10_1_0_default.h"
30 #include "navi10_enum.h"
31 
32 #include "soc15_common.h"
33 
34 u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
35 {
36 	u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
37 
38 	base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
39 	base <<= 24;
40 
41 	return base;
42 }
43 
44 u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
45 {
46 	return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
47 }
48 
49 void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
50 				uint64_t page_table_base)
51 {
52 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
53 
54 	WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
55 			    hub->ctx_addr_distance * vmid,
56 			    lower_32_bits(page_table_base));
57 
58 	WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
59 			    hub->ctx_addr_distance * vmid,
60 			    upper_32_bits(page_table_base));
61 }
62 
63 static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
64 {
65 	uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
66 
67 	gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
68 
69 	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
70 		     (u32)(adev->gmc.gart_start >> 12));
71 	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
72 		     (u32)(adev->gmc.gart_start >> 44));
73 
74 	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
75 		     (u32)(adev->gmc.gart_end >> 12));
76 	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
77 		     (u32)(adev->gmc.gart_end >> 44));
78 }
79 
80 static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
81 {
82 	uint64_t value;
83 
84 	if (!amdgpu_sriov_vf(adev)) {
85 		/*
86 		 * the new L1 policy will block SRIOV guest from writing
87 		 * these regs, and they will be programed at host.
88 		 * so skip programing these regs.
89 		 */
90 		/* Disable AGP. */
91 		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
92 		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
93 		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
94 
95 		/* Program the system aperture low logical page number. */
96 		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
97 			     adev->gmc.vram_start >> 18);
98 		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
99 			     adev->gmc.vram_end >> 18);
100 
101 		/* Set default page address. */
102 		value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
103 			+ adev->vm_manager.vram_base_offset;
104 		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
105 			     (u32)(value >> 12));
106 		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
107 			     (u32)(value >> 44));
108 	}
109 
110 	/* Program "protection fault". */
111 	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
112 		     (u32)(adev->dummy_page_addr >> 12));
113 	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
114 		     (u32)((u64)adev->dummy_page_addr >> 44));
115 
116 	WREG32_FIELD15(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2,
117 		       ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
118 }
119 
120 
121 static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
122 {
123 	uint32_t tmp;
124 
125 	/* Setup TLB control */
126 	tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
127 
128 	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
129 	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
130 	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
131 			    ENABLE_ADVANCED_DRIVER_MODEL, 1);
132 	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
133 			    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
134 	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
135 	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
136 			    MTYPE, MTYPE_UC); /* UC, uncached */
137 
138 	WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
139 }
140 
141 static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
142 {
143 	uint32_t tmp;
144 
145 	/* These regs are not accessible for VF, PF will program these in SRIOV */
146 	if (amdgpu_sriov_vf(adev))
147 		return;
148 
149 	/* Setup L2 cache */
150 	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
151 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
152 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
153 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
154 			    ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
155 	/* XXX for emulation, Refer to closed source code.*/
156 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
157 			    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
158 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
159 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
160 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
161 	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, tmp);
162 
163 	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2);
164 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
165 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
166 	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
167 
168 	tmp = mmGCVM_L2_CNTL3_DEFAULT;
169 	if (adev->gmc.translate_further) {
170 		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
171 		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
172 				    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
173 	} else {
174 		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
175 		tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
176 				    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
177 	}
178 	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
179 
180 	tmp = mmGCVM_L2_CNTL4_DEFAULT;
181 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
182 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
183 	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL4, tmp);
184 
185 	tmp = mmGCVM_L2_CNTL5_DEFAULT;
186 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
187 	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL5, tmp);
188 }
189 
190 static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
191 {
192 	uint32_t tmp;
193 
194 	tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
195 	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
196 	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
197 	tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
198 			    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
199 	WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
200 }
201 
202 static void gfxhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
203 {
204 	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
205 		     0xFFFFFFFF);
206 	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
207 		     0x0000000F);
208 
209 	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
210 		     0);
211 	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
212 		     0);
213 
214 	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
215 	WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
216 
217 }
218 
219 static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
220 {
221 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
222 	int i;
223 	uint32_t tmp;
224 
225 	for (i = 0; i <= 14; i++) {
226 		tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i);
227 		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
228 		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
229 				    adev->vm_manager.num_level);
230 		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
231 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
232 		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
233 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
234 		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
235 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
236 		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
237 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
238 		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
239 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
240 		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
241 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
242 		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
243 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
244 		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
245 				PAGE_TABLE_BLOCK_SIZE,
246 				adev->vm_manager.block_size - 9);
247 		/* Send no-retry XNACK on fault to suppress VM fault storm. */
248 		tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
249 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
250 				    !amdgpu_noretry);
251 		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
252 				    i * hub->ctx_distance, tmp);
253 		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
254 				    i * hub->ctx_addr_distance, 0);
255 		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
256 				    i * hub->ctx_addr_distance, 0);
257 		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
258 				    i * hub->ctx_addr_distance,
259 				    lower_32_bits(adev->vm_manager.max_pfn - 1));
260 		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
261 				    i * hub->ctx_addr_distance,
262 				    upper_32_bits(adev->vm_manager.max_pfn - 1));
263 	}
264 }
265 
266 static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
267 {
268 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
269 	unsigned i;
270 
271 	for (i = 0 ; i < 18; ++i) {
272 		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
273 				    i * hub->eng_addr_distance, 0xffffffff);
274 		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
275 				    i * hub->eng_addr_distance, 0x1f);
276 	}
277 }
278 
279 int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
280 {
281 	/* GART Enable. */
282 	gfxhub_v2_0_init_gart_aperture_regs(adev);
283 	gfxhub_v2_0_init_system_aperture_regs(adev);
284 	gfxhub_v2_0_init_tlb_regs(adev);
285 	gfxhub_v2_0_init_cache_regs(adev);
286 
287 	gfxhub_v2_0_enable_system_domain(adev);
288 	gfxhub_v2_0_disable_identity_aperture(adev);
289 	gfxhub_v2_0_setup_vmid_config(adev);
290 	gfxhub_v2_0_program_invalidation(adev);
291 
292 	return 0;
293 }
294 
295 void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
296 {
297 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
298 	u32 tmp;
299 	u32 i;
300 
301 	/* Disable all tables */
302 	for (i = 0; i < 16; i++)
303 		WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL,
304 				    i * hub->ctx_distance, 0);
305 
306 	/* Setup TLB control */
307 	tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
308 	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
309 	tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
310 			    ENABLE_ADVANCED_DRIVER_MODEL, 0);
311 	WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
312 
313 	if (!amdgpu_sriov_vf(adev)) {
314 		/* Setup L2 cache */
315 		WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
316 		WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
317 	}
318 }
319 
320 /**
321  * gfxhub_v2_0_set_fault_enable_default - update GART/VM fault handling
322  *
323  * @adev: amdgpu_device pointer
324  * @value: true redirects VM faults to the default page
325  */
326 void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
327 					  bool value)
328 {
329 	u32 tmp;
330 	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
331 	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
332 			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
333 	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
334 			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
335 	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
336 			    PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
337 	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
338 			    PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
339 	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
340 			    TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
341 			    value);
342 	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
343 			    NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
344 	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
345 			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
346 	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
347 			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
348 	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
349 			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
350 	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
351 			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
352 	tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
353 			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
354 	if (!value) {
355 		tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
356 				CRASH_ON_NO_RETRY_FAULT, 1);
357 		tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
358 				CRASH_ON_RETRY_FAULT, 1);
359 	}
360 	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
361 }
362 
363 void gfxhub_v2_0_init(struct amdgpu_device *adev)
364 {
365 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
366 
367 	hub->ctx0_ptb_addr_lo32 =
368 		SOC15_REG_OFFSET(GC, 0,
369 				 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
370 	hub->ctx0_ptb_addr_hi32 =
371 		SOC15_REG_OFFSET(GC, 0,
372 				 mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
373 	hub->vm_inv_eng0_sem =
374 		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
375 	hub->vm_inv_eng0_req =
376 		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
377 	hub->vm_inv_eng0_ack =
378 		SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK);
379 	hub->vm_context0_cntl =
380 		SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL);
381 	hub->vm_l2_pro_fault_status =
382 		SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS);
383 	hub->vm_l2_pro_fault_cntl =
384 		SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
385 
386 	hub->ctx_distance = mmGCVM_CONTEXT1_CNTL - mmGCVM_CONTEXT0_CNTL;
387 	hub->ctx_addr_distance = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
388 		mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
389 	hub->eng_distance = mmGCVM_INVALIDATE_ENG1_REQ -
390 		mmGCVM_INVALIDATE_ENG0_REQ;
391 	hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
392 		mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
393 }
394