1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "mmhub_v1_0.h"
25 
26 #include "vega10/soc15ip.h"
27 #include "vega10/MMHUB/mmhub_1_0_offset.h"
28 #include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
29 #include "vega10/MMHUB/mmhub_1_0_default.h"
30 #include "vega10/ATHUB/athub_1_0_offset.h"
31 #include "vega10/ATHUB/athub_1_0_sh_mask.h"
32 #include "vega10/ATHUB/athub_1_0_default.h"
33 #include "vega10/vega10_enum.h"
34 
35 #include "soc15_common.h"
36 
37 u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
38 {
39 	u64 base = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE));
40 
41 	base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
42 	base <<= 24;
43 
44 	return base;
45 }
46 
47 int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
48 {
49 	u32 tmp;
50 	u64 value;
51 	uint64_t addr;
52 	u32 i;
53 
54 	/* Program MC. */
55 	/* Update configuration */
56 	DRM_INFO("%s -- in\n", __func__);
57 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR),
58 		adev->mc.vram_start >> 18);
59 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR),
60 		adev->mc.vram_end >> 18);
61 	value = adev->vram_scratch.gpu_addr - adev->mc.vram_start +
62 		adev->vm_manager.vram_base_offset;
63 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
64 				mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB),
65 				(u32)(value >> 12));
66 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
67 				mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),
68 				(u32)(value >> 44));
69 
70 	if (amdgpu_sriov_vf(adev)) {
71 		/* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are VF copy registers so
72 		vbios post doesn't program them, for SRIOV driver need to program them */
73 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE),
74 			adev->mc.vram_start >> 24);
75 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP),
76 			adev->mc.vram_end >> 24);
77 	}
78 
79 	/* Disable AGP. */
80 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BASE), 0);
81 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_TOP), 0);
82 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BOT), 0x00FFFFFF);
83 
84 	/* GART Enable. */
85 
86 	/* Setup TLB control */
87 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
88 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
89 	tmp = REG_SET_FIELD(tmp,
90 				MC_VM_MX_L1_TLB_CNTL,
91 				SYSTEM_ACCESS_MODE,
92 				3);
93 	tmp = REG_SET_FIELD(tmp,
94 				MC_VM_MX_L1_TLB_CNTL,
95 				ENABLE_ADVANCED_DRIVER_MODEL,
96 				1);
97 	tmp = REG_SET_FIELD(tmp,
98 				MC_VM_MX_L1_TLB_CNTL,
99 				SYSTEM_APERTURE_UNMAPPED_ACCESS,
100 				0);
101 	tmp = REG_SET_FIELD(tmp,
102 				MC_VM_MX_L1_TLB_CNTL,
103 				ECO_BITS,
104 				0);
105 	tmp = REG_SET_FIELD(tmp,
106 				MC_VM_MX_L1_TLB_CNTL,
107 				MTYPE,
108 				MTYPE_UC);/* XXX for emulation. */
109 	tmp = REG_SET_FIELD(tmp,
110 				MC_VM_MX_L1_TLB_CNTL,
111 				ATC_EN,
112 				1);
113 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
114 
115 	/* Setup L2 cache */
116 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
117 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
118 	tmp = REG_SET_FIELD(tmp,
119 				VM_L2_CNTL,
120 				ENABLE_L2_FRAGMENT_PROCESSING,
121 				0);
122 	tmp = REG_SET_FIELD(tmp,
123 				VM_L2_CNTL,
124 				L2_PDE0_CACHE_TAG_GENERATION_MODE,
125 				0);/* XXX for emulation, Refer to closed source code.*/
126 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
127 	tmp = REG_SET_FIELD(tmp,
128 				VM_L2_CNTL,
129 				CONTEXT1_IDENTITY_ACCESS_MODE,
130 				1);
131 	tmp = REG_SET_FIELD(tmp,
132 				VM_L2_CNTL,
133 				IDENTITY_MODE_FRAGMENT_SIZE,
134 				0);
135 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
136 
137 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2));
138 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
139 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
140 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2), tmp);
141 
142 	tmp = mmVM_L2_CNTL3_DEFAULT;
143 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), tmp);
144 
145 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4));
146 	tmp = REG_SET_FIELD(tmp,
147 			    VM_L2_CNTL4,
148 			    VMC_TAP_PDE_REQUEST_PHYSICAL,
149 			    0);
150 	tmp = REG_SET_FIELD(tmp,
151 			    VM_L2_CNTL4,
152 			    VMC_TAP_PTE_REQUEST_PHYSICAL,
153 			    0);
154 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4), tmp);
155 
156 	/* setup context0 */
157 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
158 				mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32),
159 		(u32)(adev->mc.gtt_start >> 12));
160 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
161 				mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32),
162 		(u32)(adev->mc.gtt_start >> 44));
163 
164 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
165 				mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32),
166 		(u32)(adev->mc.gtt_end >> 12));
167 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
168 				mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32),
169 		(u32)(adev->mc.gtt_end >> 44));
170 
171 	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
172 	value = adev->gart.table_addr - adev->mc.vram_start +
173 		adev->vm_manager.vram_base_offset;
174 	value &= 0x0000FFFFFFFFF000ULL;
175 	value |= 0x1; /* valid bit */
176 
177 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
178 				mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),
179 		(u32)value);
180 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
181 				mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),
182 		(u32)(value >> 32));
183 
184 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
185 				mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32),
186 		(u32)(adev->dummy_page.addr >> 12));
187 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
188 				mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32),
189 		(u32)((u64)adev->dummy_page.addr >> 44));
190 
191 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2));
192 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
193 			    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY,
194 			    1);
195 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2), tmp);
196 
197 	addr = SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
198 	tmp = RREG32(addr);
199 
200 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
201 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
202 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL), tmp);
203 
204 	tmp = RREG32(addr);
205 
206 	/* Disable identity aperture.*/
207 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
208 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32), 0XFFFFFFFF);
209 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
210 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32), 0x0000000F);
211 
212 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
213 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32), 0);
214 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
215 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32), 0);
216 
217 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
218 		mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32), 0);
219 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
220 		mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32), 0);
221 
222 	for (i = 0; i <= 14; i++) {
223 		tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL)
224 				+ i);
225 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
226 				ENABLE_CONTEXT, 1);
227 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
228 				PAGE_TABLE_DEPTH, adev->vm_manager.num_level);
229 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
230 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
231 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
232 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
233 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
234 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
235 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
236 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
237 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
238 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
239 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
240 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
241 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
242 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
243 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
244 				PAGE_TABLE_BLOCK_SIZE,
245 				adev->vm_manager.block_size - 9);
246 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
247 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
248 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
249 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32) + i*2,
250 			lower_32_bits(adev->vm_manager.max_pfn - 1));
251 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32) + i*2,
252 			upper_32_bits(adev->vm_manager.max_pfn - 1));
253 	}
254 
255 	return 0;
256 }
257 
258 void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
259 {
260 	u32 tmp;
261 	u32 i;
262 
263 	/* Disable all tables */
264 	for (i = 0; i < 16; i++)
265 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL) + i, 0);
266 
267 	/* Setup TLB control */
268 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
269 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
270 	tmp = REG_SET_FIELD(tmp,
271 				MC_VM_MX_L1_TLB_CNTL,
272 				ENABLE_ADVANCED_DRIVER_MODEL,
273 				0);
274 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
275 
276 	/* Setup L2 cache */
277 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
278 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
279 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
280 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), 0);
281 }
282 
283 /**
284  * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
285  *
286  * @adev: amdgpu_device pointer
287  * @value: true redirects VM faults to the default page
288  */
289 void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
290 {
291 	u32 tmp;
292 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL));
293 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
294 			RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
295 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
296 			PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
297 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
298 			PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
299 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
300 			PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
301 	tmp = REG_SET_FIELD(tmp,
302 			VM_L2_PROTECTION_FAULT_CNTL,
303 			TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
304 			value);
305 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
306 			NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
307 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
308 			DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
309 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
310 			VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
311 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
312 			READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
313 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
314 			WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
315 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
316 			EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
317 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
318 }
319 
320 static int mmhub_v1_0_early_init(void *handle)
321 {
322 	return 0;
323 }
324 
325 static int mmhub_v1_0_late_init(void *handle)
326 {
327 	return 0;
328 }
329 
330 static int mmhub_v1_0_sw_init(void *handle)
331 {
332 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
333 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
334 
335 	hub->ctx0_ptb_addr_lo32 =
336 		SOC15_REG_OFFSET(MMHUB, 0,
337 				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
338 	hub->ctx0_ptb_addr_hi32 =
339 		SOC15_REG_OFFSET(MMHUB, 0,
340 				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
341 	hub->vm_inv_eng0_req =
342 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
343 	hub->vm_inv_eng0_ack =
344 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
345 	hub->vm_context0_cntl =
346 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
347 	hub->vm_l2_pro_fault_status =
348 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
349 	hub->vm_l2_pro_fault_cntl =
350 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
351 
352 	return 0;
353 }
354 
355 static int mmhub_v1_0_sw_fini(void *handle)
356 {
357 	return 0;
358 }
359 
360 static int mmhub_v1_0_hw_init(void *handle)
361 {
362 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
363 	unsigned i;
364 
365 	for (i = 0; i < 18; ++i) {
366 		WREG32(SOC15_REG_OFFSET(MMHUB, 0,
367 					mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32) +
368 		       2 * i, 0xffffffff);
369 		WREG32(SOC15_REG_OFFSET(MMHUB, 0,
370 					mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32) +
371 		       2 * i, 0x1f);
372 	}
373 
374 	return 0;
375 }
376 
377 static int mmhub_v1_0_hw_fini(void *handle)
378 {
379 	return 0;
380 }
381 
382 static int mmhub_v1_0_suspend(void *handle)
383 {
384 	return 0;
385 }
386 
387 static int mmhub_v1_0_resume(void *handle)
388 {
389 	return 0;
390 }
391 
392 static bool mmhub_v1_0_is_idle(void *handle)
393 {
394 	return true;
395 }
396 
397 static int mmhub_v1_0_wait_for_idle(void *handle)
398 {
399 	return 0;
400 }
401 
402 static int mmhub_v1_0_soft_reset(void *handle)
403 {
404 	return 0;
405 }
406 
407 static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
408 							bool enable)
409 {
410 	uint32_t def, data, def1, data1, def2, data2;
411 
412 	def  = data  = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
413 	def1 = data1 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2));
414 	def2 = data2 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2));
415 
416 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
417 		data |= ATC_L2_MISC_CG__ENABLE_MASK;
418 
419 		data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
420 		           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
421 		           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
422 		           DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
423 		           DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
424 		           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
425 
426 		data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
427 		           DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
428 		           DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
429 		           DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
430 		           DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
431 		           DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
432 	} else {
433 		data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
434 
435 		data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
436 			  DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
437 			  DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
438 			  DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
439 			  DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
440 			  DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
441 
442 		data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
443 		          DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
444 		          DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
445 		          DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
446 		          DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
447 		          DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
448 	}
449 
450 	if (def != data)
451 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
452 
453 	if (def1 != data1)
454 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2), data1);
455 
456 	if (def2 != data2)
457 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2), data2);
458 }
459 
460 static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
461 						   bool enable)
462 {
463 	uint32_t def, data;
464 
465 	def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
466 
467 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
468 		data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
469 	else
470 		data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
471 
472 	if (def != data)
473 		WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
474 }
475 
476 static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
477 						       bool enable)
478 {
479 	uint32_t def, data;
480 
481 	def = data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
482 
483 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
484 		data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
485 	else
486 		data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
487 
488 	if (def != data)
489 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
490 }
491 
492 static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
493 						  bool enable)
494 {
495 	uint32_t def, data;
496 
497 	def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
498 
499 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
500 	    (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
501 		data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
502 	else
503 		data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
504 
505 	if(def != data)
506 		WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
507 }
508 
509 static int mmhub_v1_0_set_clockgating_state(void *handle,
510 					enum amd_clockgating_state state)
511 {
512 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
513 
514 	if (amdgpu_sriov_vf(adev))
515 		return 0;
516 
517 	switch (adev->asic_type) {
518 	case CHIP_VEGA10:
519 		mmhub_v1_0_update_medium_grain_clock_gating(adev,
520 				state == AMD_CG_STATE_GATE ? true : false);
521 		athub_update_medium_grain_clock_gating(adev,
522 				state == AMD_CG_STATE_GATE ? true : false);
523 		mmhub_v1_0_update_medium_grain_light_sleep(adev,
524 				state == AMD_CG_STATE_GATE ? true : false);
525 		athub_update_medium_grain_light_sleep(adev,
526 				state == AMD_CG_STATE_GATE ? true : false);
527 		break;
528 	default:
529 		break;
530 	}
531 
532 	return 0;
533 }
534 
535 static void mmhub_v1_0_get_clockgating_state(void *handle, u32 *flags)
536 {
537 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
538 	int data;
539 
540 	if (amdgpu_sriov_vf(adev))
541 		*flags = 0;
542 
543 	/* AMD_CG_SUPPORT_MC_MGCG */
544 	data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
545 	if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
546 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
547 
548 	/* AMD_CG_SUPPORT_MC_LS */
549 	data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
550 	if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
551 		*flags |= AMD_CG_SUPPORT_MC_LS;
552 }
553 
554 static int mmhub_v1_0_set_powergating_state(void *handle,
555 					enum amd_powergating_state state)
556 {
557 	return 0;
558 }
559 
560 const struct amd_ip_funcs mmhub_v1_0_ip_funcs = {
561 	.name = "mmhub_v1_0",
562 	.early_init = mmhub_v1_0_early_init,
563 	.late_init = mmhub_v1_0_late_init,
564 	.sw_init = mmhub_v1_0_sw_init,
565 	.sw_fini = mmhub_v1_0_sw_fini,
566 	.hw_init = mmhub_v1_0_hw_init,
567 	.hw_fini = mmhub_v1_0_hw_fini,
568 	.suspend = mmhub_v1_0_suspend,
569 	.resume = mmhub_v1_0_resume,
570 	.is_idle = mmhub_v1_0_is_idle,
571 	.wait_for_idle = mmhub_v1_0_wait_for_idle,
572 	.soft_reset = mmhub_v1_0_soft_reset,
573 	.set_clockgating_state = mmhub_v1_0_set_clockgating_state,
574 	.set_powergating_state = mmhub_v1_0_set_powergating_state,
575 	.get_clockgating_state = mmhub_v1_0_get_clockgating_state,
576 };
577 
578 const struct amdgpu_ip_block_version mmhub_v1_0_ip_block =
579 {
580 	.type = AMD_IP_BLOCK_TYPE_MMHUB,
581 	.major = 1,
582 	.minor = 0,
583 	.rev = 0,
584 	.funcs = &mmhub_v1_0_ip_funcs,
585 };
586