xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c (revision e5f586c763a079349398e2b0c7c271386193ac34)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "mmhub_v1_0.h"
25 
26 #include "vega10/soc15ip.h"
27 #include "vega10/MMHUB/mmhub_1_0_offset.h"
28 #include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
29 #include "vega10/MMHUB/mmhub_1_0_default.h"
30 #include "vega10/ATHUB/athub_1_0_offset.h"
31 #include "vega10/ATHUB/athub_1_0_sh_mask.h"
32 #include "vega10/ATHUB/athub_1_0_default.h"
33 #include "vega10/vega10_enum.h"
34 
35 #include "soc15_common.h"
36 
37 u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
38 {
39 	u64 base = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE));
40 
41 	base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
42 	base <<= 24;
43 
44 	return base;
45 }
46 
47 int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
48 {
49 	u32 tmp;
50 	u64 value;
51 	uint64_t addr;
52 	u32 i;
53 
54 	/* Program MC. */
55 	/* Update configuration */
56 	DRM_INFO("%s -- in\n", __func__);
57 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR),
58 		adev->mc.vram_start >> 18);
59 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR),
60 		adev->mc.vram_end >> 18);
61 	value = adev->vram_scratch.gpu_addr - adev->mc.vram_start +
62 		adev->vm_manager.vram_base_offset;
63 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
64 				mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB),
65 				(u32)(value >> 12));
66 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
67 				mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),
68 				(u32)(value >> 44));
69 
70 	if (amdgpu_sriov_vf(adev)) {
71 		/* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are VF copy registers so
72 		vbios post doesn't program them, for SRIOV driver need to program them */
73 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE),
74 			adev->mc.vram_start >> 24);
75 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP),
76 			adev->mc.vram_end >> 24);
77 	}
78 
79 	/* Disable AGP. */
80 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BASE), 0);
81 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_TOP), 0);
82 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BOT), 0x00FFFFFF);
83 
84 	/* GART Enable. */
85 
86 	/* Setup TLB control */
87 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
88 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
89 	tmp = REG_SET_FIELD(tmp,
90 				MC_VM_MX_L1_TLB_CNTL,
91 				SYSTEM_ACCESS_MODE,
92 				3);
93 	tmp = REG_SET_FIELD(tmp,
94 				MC_VM_MX_L1_TLB_CNTL,
95 				ENABLE_ADVANCED_DRIVER_MODEL,
96 				1);
97 	tmp = REG_SET_FIELD(tmp,
98 				MC_VM_MX_L1_TLB_CNTL,
99 				SYSTEM_APERTURE_UNMAPPED_ACCESS,
100 				0);
101 	tmp = REG_SET_FIELD(tmp,
102 				MC_VM_MX_L1_TLB_CNTL,
103 				ECO_BITS,
104 				0);
105 	tmp = REG_SET_FIELD(tmp,
106 				MC_VM_MX_L1_TLB_CNTL,
107 				MTYPE,
108 				MTYPE_UC);/* XXX for emulation. */
109 	tmp = REG_SET_FIELD(tmp,
110 				MC_VM_MX_L1_TLB_CNTL,
111 				ATC_EN,
112 				1);
113 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
114 
115 	/* Setup L2 cache */
116 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
117 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
118 	tmp = REG_SET_FIELD(tmp,
119 				VM_L2_CNTL,
120 				ENABLE_L2_FRAGMENT_PROCESSING,
121 				0);
122 	tmp = REG_SET_FIELD(tmp,
123 				VM_L2_CNTL,
124 				L2_PDE0_CACHE_TAG_GENERATION_MODE,
125 				0);/* XXX for emulation, Refer to closed source code.*/
126 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
127 	tmp = REG_SET_FIELD(tmp,
128 				VM_L2_CNTL,
129 				CONTEXT1_IDENTITY_ACCESS_MODE,
130 				1);
131 	tmp = REG_SET_FIELD(tmp,
132 				VM_L2_CNTL,
133 				IDENTITY_MODE_FRAGMENT_SIZE,
134 				0);
135 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
136 
137 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2));
138 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
139 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
140 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2), tmp);
141 
142 	tmp = mmVM_L2_CNTL3_DEFAULT;
143 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), tmp);
144 
145 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4));
146 	tmp = REG_SET_FIELD(tmp,
147 			    VM_L2_CNTL4,
148 			    VMC_TAP_PDE_REQUEST_PHYSICAL,
149 			    0);
150 	tmp = REG_SET_FIELD(tmp,
151 			    VM_L2_CNTL4,
152 			    VMC_TAP_PTE_REQUEST_PHYSICAL,
153 			    0);
154 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4), tmp);
155 
156 	/* setup context0 */
157 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
158 				mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32),
159 		(u32)(adev->mc.gtt_start >> 12));
160 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
161 				mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32),
162 		(u32)(adev->mc.gtt_start >> 44));
163 
164 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
165 				mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32),
166 		(u32)(adev->mc.gtt_end >> 12));
167 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
168 				mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32),
169 		(u32)(adev->mc.gtt_end >> 44));
170 
171 	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
172 	value = adev->gart.table_addr - adev->mc.vram_start +
173 		adev->vm_manager.vram_base_offset;
174 	value &= 0x0000FFFFFFFFF000ULL;
175 	value |= 0x1; /* valid bit */
176 
177 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
178 				mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),
179 		(u32)value);
180 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
181 				mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),
182 		(u32)(value >> 32));
183 
184 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
185 				mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32),
186 		(u32)(adev->dummy_page.addr >> 12));
187 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
188 				mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32),
189 		(u32)(adev->dummy_page.addr >> 44));
190 
191 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2));
192 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
193 			    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY,
194 			    1);
195 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2), tmp);
196 
197 	addr = SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
198 	tmp = RREG32(addr);
199 
200 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
201 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
202 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL), tmp);
203 
204 	tmp = RREG32(addr);
205 
206 	/* Disable identity aperture.*/
207 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
208 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32), 0XFFFFFFFF);
209 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
210 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32), 0x0000000F);
211 
212 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
213 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32), 0);
214 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
215 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32), 0);
216 
217 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
218 		mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32), 0);
219 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
220 		mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32), 0);
221 
222 	for (i = 0; i <= 14; i++) {
223 		tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL)
224 				+ i);
225 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
226 				ENABLE_CONTEXT, 1);
227 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
228 				PAGE_TABLE_DEPTH, adev->vm_manager.num_level);
229 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
230 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
231 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
232 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
233 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
234 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
235 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
236 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
237 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
238 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
239 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
240 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
241 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
242 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
243 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
244 				PAGE_TABLE_BLOCK_SIZE,
245 				amdgpu_vm_block_size - 9);
246 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
247 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
248 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
249 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32) + i*2,
250 				adev->vm_manager.max_pfn - 1);
251 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32) + i*2, 0);
252 	}
253 
254 	return 0;
255 }
256 
257 void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
258 {
259 	u32 tmp;
260 	u32 i;
261 
262 	/* Disable all tables */
263 	for (i = 0; i < 16; i++)
264 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL) + i, 0);
265 
266 	/* Setup TLB control */
267 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
268 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
269 	tmp = REG_SET_FIELD(tmp,
270 				MC_VM_MX_L1_TLB_CNTL,
271 				ENABLE_ADVANCED_DRIVER_MODEL,
272 				0);
273 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
274 
275 	/* Setup L2 cache */
276 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
277 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
278 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
279 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), 0);
280 }
281 
282 /**
283  * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
284  *
285  * @adev: amdgpu_device pointer
286  * @value: true redirects VM faults to the default page
287  */
288 void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
289 {
290 	u32 tmp;
291 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL));
292 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
293 			RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
294 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
295 			PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
296 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
297 			PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
298 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
299 			PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
300 	tmp = REG_SET_FIELD(tmp,
301 			VM_L2_PROTECTION_FAULT_CNTL,
302 			TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
303 			value);
304 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
305 			NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
306 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
307 			DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
308 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
309 			VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
310 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
311 			READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
312 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
313 			WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
314 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
315 			EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
316 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
317 }
318 
319 static uint32_t mmhub_v1_0_get_invalidate_req(unsigned int vm_id)
320 {
321 	u32 req = 0;
322 
323 	/* invalidate using legacy mode on vm_id*/
324 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
325 			    PER_VMID_INVALIDATE_REQ, 1 << vm_id);
326 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
327 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
328 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
329 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
330 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
331 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
332 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
333 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
334 
335 	return req;
336 }
337 
338 static uint32_t mmhub_v1_0_get_vm_protection_bits(void)
339 {
340 	return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
341 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
342 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
343 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
344 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
345 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
346 		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
347 }
348 
349 static int mmhub_v1_0_early_init(void *handle)
350 {
351 	return 0;
352 }
353 
354 static int mmhub_v1_0_late_init(void *handle)
355 {
356 	return 0;
357 }
358 
359 static int mmhub_v1_0_sw_init(void *handle)
360 {
361 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
362 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
363 
364 	hub->ctx0_ptb_addr_lo32 =
365 		SOC15_REG_OFFSET(MMHUB, 0,
366 				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
367 	hub->ctx0_ptb_addr_hi32 =
368 		SOC15_REG_OFFSET(MMHUB, 0,
369 				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
370 	hub->vm_inv_eng0_req =
371 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
372 	hub->vm_inv_eng0_ack =
373 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
374 	hub->vm_context0_cntl =
375 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
376 	hub->vm_l2_pro_fault_status =
377 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
378 	hub->vm_l2_pro_fault_cntl =
379 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
380 
381 	hub->get_invalidate_req = mmhub_v1_0_get_invalidate_req;
382 	hub->get_vm_protection_bits = mmhub_v1_0_get_vm_protection_bits;
383 
384 	return 0;
385 }
386 
387 static int mmhub_v1_0_sw_fini(void *handle)
388 {
389 	return 0;
390 }
391 
392 static int mmhub_v1_0_hw_init(void *handle)
393 {
394 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
395 	unsigned i;
396 
397 	for (i = 0; i < 18; ++i) {
398 		WREG32(SOC15_REG_OFFSET(MMHUB, 0,
399 					mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32) +
400 		       2 * i, 0xffffffff);
401 		WREG32(SOC15_REG_OFFSET(MMHUB, 0,
402 					mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32) +
403 		       2 * i, 0x1f);
404 	}
405 
406 	return 0;
407 }
408 
409 static int mmhub_v1_0_hw_fini(void *handle)
410 {
411 	return 0;
412 }
413 
414 static int mmhub_v1_0_suspend(void *handle)
415 {
416 	return 0;
417 }
418 
419 static int mmhub_v1_0_resume(void *handle)
420 {
421 	return 0;
422 }
423 
424 static bool mmhub_v1_0_is_idle(void *handle)
425 {
426 	return true;
427 }
428 
429 static int mmhub_v1_0_wait_for_idle(void *handle)
430 {
431 	return 0;
432 }
433 
434 static int mmhub_v1_0_soft_reset(void *handle)
435 {
436 	return 0;
437 }
438 
439 static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
440 							bool enable)
441 {
442 	uint32_t def, data, def1, data1, def2, data2;
443 
444 	def  = data  = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
445 	def1 = data1 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2));
446 	def2 = data2 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2));
447 
448 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
449 		data |= ATC_L2_MISC_CG__ENABLE_MASK;
450 
451 		data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
452 		           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
453 		           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
454 		           DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
455 		           DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
456 		           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
457 
458 		data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
459 		           DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
460 		           DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
461 		           DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
462 		           DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
463 		           DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
464 	} else {
465 		data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
466 
467 		data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
468 			  DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
469 			  DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
470 			  DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
471 			  DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
472 			  DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
473 
474 		data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
475 		          DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
476 		          DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
477 		          DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
478 		          DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
479 		          DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
480 	}
481 
482 	if (def != data)
483 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
484 
485 	if (def1 != data1)
486 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2), data1);
487 
488 	if (def2 != data2)
489 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2), data2);
490 }
491 
492 static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
493 						   bool enable)
494 {
495 	uint32_t def, data;
496 
497 	def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
498 
499 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
500 		data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
501 	else
502 		data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
503 
504 	if (def != data)
505 		WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
506 }
507 
508 static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
509 						       bool enable)
510 {
511 	uint32_t def, data;
512 
513 	def = data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
514 
515 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
516 		data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
517 	else
518 		data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
519 
520 	if (def != data)
521 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
522 }
523 
524 static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
525 						  bool enable)
526 {
527 	uint32_t def, data;
528 
529 	def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
530 
531 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
532 	    (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
533 		data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
534 	else
535 		data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
536 
537 	if(def != data)
538 		WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
539 }
540 
541 static int mmhub_v1_0_set_clockgating_state(void *handle,
542 					enum amd_clockgating_state state)
543 {
544 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
545 
546 	switch (adev->asic_type) {
547 	case CHIP_VEGA10:
548 		mmhub_v1_0_update_medium_grain_clock_gating(adev,
549 				state == AMD_CG_STATE_GATE ? true : false);
550 		athub_update_medium_grain_clock_gating(adev,
551 				state == AMD_CG_STATE_GATE ? true : false);
552 		mmhub_v1_0_update_medium_grain_light_sleep(adev,
553 				state == AMD_CG_STATE_GATE ? true : false);
554 		athub_update_medium_grain_light_sleep(adev,
555 				state == AMD_CG_STATE_GATE ? true : false);
556 		break;
557 	default:
558 		break;
559 	}
560 
561 	return 0;
562 }
563 
564 static void mmhub_v1_0_get_clockgating_state(void *handle, u32 *flags)
565 {
566 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
567 	int data;
568 
569 	if (amdgpu_sriov_vf(adev))
570 		*flags = 0;
571 
572 	/* AMD_CG_SUPPORT_MC_MGCG */
573 	data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
574 	if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
575 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
576 
577 	/* AMD_CG_SUPPORT_MC_LS */
578 	data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
579 	if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
580 		*flags |= AMD_CG_SUPPORT_MC_LS;
581 }
582 
583 static int mmhub_v1_0_set_powergating_state(void *handle,
584 					enum amd_powergating_state state)
585 {
586 	return 0;
587 }
588 
589 const struct amd_ip_funcs mmhub_v1_0_ip_funcs = {
590 	.name = "mmhub_v1_0",
591 	.early_init = mmhub_v1_0_early_init,
592 	.late_init = mmhub_v1_0_late_init,
593 	.sw_init = mmhub_v1_0_sw_init,
594 	.sw_fini = mmhub_v1_0_sw_fini,
595 	.hw_init = mmhub_v1_0_hw_init,
596 	.hw_fini = mmhub_v1_0_hw_fini,
597 	.suspend = mmhub_v1_0_suspend,
598 	.resume = mmhub_v1_0_resume,
599 	.is_idle = mmhub_v1_0_is_idle,
600 	.wait_for_idle = mmhub_v1_0_wait_for_idle,
601 	.soft_reset = mmhub_v1_0_soft_reset,
602 	.set_clockgating_state = mmhub_v1_0_set_clockgating_state,
603 	.set_powergating_state = mmhub_v1_0_set_powergating_state,
604 	.get_clockgating_state = mmhub_v1_0_get_clockgating_state,
605 };
606 
607 const struct amdgpu_ip_block_version mmhub_v1_0_ip_block =
608 {
609 	.type = AMD_IP_BLOCK_TYPE_MMHUB,
610 	.major = 1,
611 	.minor = 0,
612 	.rev = 0,
613 	.funcs = &mmhub_v1_0_ip_funcs,
614 };
615