xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c (revision a6ca5ac746d104019e76c29e69c2a1fc6dd2b29f)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu.h"
24 #include "mmhub_v1_0.h"
25 
26 #include "vega10/soc15ip.h"
27 #include "vega10/MMHUB/mmhub_1_0_offset.h"
28 #include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
29 #include "vega10/MMHUB/mmhub_1_0_default.h"
30 #include "vega10/ATHUB/athub_1_0_offset.h"
31 #include "vega10/ATHUB/athub_1_0_sh_mask.h"
32 #include "vega10/ATHUB/athub_1_0_default.h"
33 #include "vega10/vega10_enum.h"
34 
35 #include "soc15_common.h"
36 
37 #define mmDAGB0_CNTL_MISC2_RV 0x008f
38 #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
39 
40 u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
41 {
42 	u64 base = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE));
43 
44 	base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
45 	base <<= 24;
46 
47 	return base;
48 }
49 
50 int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
51 {
52 	u32 tmp;
53 	u64 value;
54 	uint64_t addr;
55 	u32 i;
56 
57 	/* Program MC. */
58 	/* Update configuration */
59 	DRM_INFO("%s -- in\n", __func__);
60 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR),
61 		adev->mc.vram_start >> 18);
62 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR),
63 		adev->mc.vram_end >> 18);
64 	value = adev->vram_scratch.gpu_addr - adev->mc.vram_start +
65 		adev->vm_manager.vram_base_offset;
66 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
67 				mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB),
68 				(u32)(value >> 12));
69 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
70 				mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),
71 				(u32)(value >> 44));
72 
73 	if (amdgpu_sriov_vf(adev)) {
74 		/* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are VF copy registers so
75 		vbios post doesn't program them, for SRIOV driver need to program them */
76 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE),
77 			adev->mc.vram_start >> 24);
78 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP),
79 			adev->mc.vram_end >> 24);
80 	}
81 
82 	/* Disable AGP. */
83 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BASE), 0);
84 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_TOP), 0);
85 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_AGP_BOT), 0x00FFFFFF);
86 
87 	/* GART Enable. */
88 
89 	/* Setup TLB control */
90 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
91 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
92 	tmp = REG_SET_FIELD(tmp,
93 				MC_VM_MX_L1_TLB_CNTL,
94 				SYSTEM_ACCESS_MODE,
95 				3);
96 	tmp = REG_SET_FIELD(tmp,
97 				MC_VM_MX_L1_TLB_CNTL,
98 				ENABLE_ADVANCED_DRIVER_MODEL,
99 				1);
100 	tmp = REG_SET_FIELD(tmp,
101 				MC_VM_MX_L1_TLB_CNTL,
102 				SYSTEM_APERTURE_UNMAPPED_ACCESS,
103 				0);
104 	tmp = REG_SET_FIELD(tmp,
105 				MC_VM_MX_L1_TLB_CNTL,
106 				ECO_BITS,
107 				0);
108 	tmp = REG_SET_FIELD(tmp,
109 				MC_VM_MX_L1_TLB_CNTL,
110 				MTYPE,
111 				MTYPE_UC);/* XXX for emulation. */
112 	tmp = REG_SET_FIELD(tmp,
113 				MC_VM_MX_L1_TLB_CNTL,
114 				ATC_EN,
115 				1);
116 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
117 
118 	/* Setup L2 cache */
119 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
120 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
121 	tmp = REG_SET_FIELD(tmp,
122 				VM_L2_CNTL,
123 				ENABLE_L2_FRAGMENT_PROCESSING,
124 				0);
125 	tmp = REG_SET_FIELD(tmp,
126 				VM_L2_CNTL,
127 				L2_PDE0_CACHE_TAG_GENERATION_MODE,
128 				0);/* XXX for emulation, Refer to closed source code.*/
129 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
130 	tmp = REG_SET_FIELD(tmp,
131 				VM_L2_CNTL,
132 				CONTEXT1_IDENTITY_ACCESS_MODE,
133 				1);
134 	tmp = REG_SET_FIELD(tmp,
135 				VM_L2_CNTL,
136 				IDENTITY_MODE_FRAGMENT_SIZE,
137 				0);
138 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
139 
140 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2));
141 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
142 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
143 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL2), tmp);
144 
145 	tmp = mmVM_L2_CNTL3_DEFAULT;
146 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), tmp);
147 
148 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4));
149 	tmp = REG_SET_FIELD(tmp,
150 			    VM_L2_CNTL4,
151 			    VMC_TAP_PDE_REQUEST_PHYSICAL,
152 			    0);
153 	tmp = REG_SET_FIELD(tmp,
154 			    VM_L2_CNTL4,
155 			    VMC_TAP_PTE_REQUEST_PHYSICAL,
156 			    0);
157 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL4), tmp);
158 
159 	/* setup context0 */
160 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
161 				mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32),
162 		(u32)(adev->mc.gtt_start >> 12));
163 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
164 				mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32),
165 		(u32)(adev->mc.gtt_start >> 44));
166 
167 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
168 				mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32),
169 		(u32)(adev->mc.gtt_end >> 12));
170 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
171 				mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32),
172 		(u32)(adev->mc.gtt_end >> 44));
173 
174 	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
175 	value = adev->gart.table_addr - adev->mc.vram_start +
176 		adev->vm_manager.vram_base_offset;
177 	value &= 0x0000FFFFFFFFF000ULL;
178 	value |= 0x1; /* valid bit */
179 
180 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
181 				mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),
182 		(u32)value);
183 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
184 				mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),
185 		(u32)(value >> 32));
186 
187 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
188 				mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32),
189 		(u32)(adev->dummy_page.addr >> 12));
190 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
191 				mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32),
192 		(u32)((u64)adev->dummy_page.addr >> 44));
193 
194 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2));
195 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
196 			    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY,
197 			    1);
198 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL2), tmp);
199 
200 	addr = SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
201 	tmp = RREG32(addr);
202 
203 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
204 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
205 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL), tmp);
206 
207 	tmp = RREG32(addr);
208 
209 	/* Disable identity aperture.*/
210 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
211 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32), 0XFFFFFFFF);
212 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
213 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32), 0x0000000F);
214 
215 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
216 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32), 0);
217 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
218 		mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32), 0);
219 
220 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
221 		mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32), 0);
222 	WREG32(SOC15_REG_OFFSET(MMHUB, 0,
223 		mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32), 0);
224 
225 	for (i = 0; i <= 14; i++) {
226 		tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL)
227 				+ i);
228 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
229 				ENABLE_CONTEXT, 1);
230 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
231 				PAGE_TABLE_DEPTH, adev->vm_manager.num_level);
232 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
233 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
234 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
235 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
236 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
237 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
238 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
239 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
240 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
241 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
242 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
243 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
244 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
245 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
246 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
247 				PAGE_TABLE_BLOCK_SIZE,
248 				adev->vm_manager.block_size - 9);
249 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
250 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
251 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
252 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32) + i*2,
253 			lower_32_bits(adev->vm_manager.max_pfn - 1));
254 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32) + i*2,
255 			upper_32_bits(adev->vm_manager.max_pfn - 1));
256 	}
257 
258 	return 0;
259 }
260 
261 void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
262 {
263 	u32 tmp;
264 	u32 i;
265 
266 	/* Disable all tables */
267 	for (i = 0; i < 16; i++)
268 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL) + i, 0);
269 
270 	/* Setup TLB control */
271 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL));
272 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
273 	tmp = REG_SET_FIELD(tmp,
274 				MC_VM_MX_L1_TLB_CNTL,
275 				ENABLE_ADVANCED_DRIVER_MODEL,
276 				0);
277 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp);
278 
279 	/* Setup L2 cache */
280 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL));
281 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
282 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL), tmp);
283 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_CNTL3), 0);
284 }
285 
286 /**
287  * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
288  *
289  * @adev: amdgpu_device pointer
290  * @value: true redirects VM faults to the default page
291  */
292 void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
293 {
294 	u32 tmp;
295 	tmp = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL));
296 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
297 			RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
298 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
299 			PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
300 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
301 			PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
302 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
303 			PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
304 	tmp = REG_SET_FIELD(tmp,
305 			VM_L2_PROTECTION_FAULT_CNTL,
306 			TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
307 			value);
308 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
309 			NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
310 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
311 			DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
312 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
313 			VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
314 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
315 			READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
316 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
317 			WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
318 	tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
319 			EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
320 	WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
321 }
322 
323 static int mmhub_v1_0_early_init(void *handle)
324 {
325 	return 0;
326 }
327 
328 static int mmhub_v1_0_late_init(void *handle)
329 {
330 	return 0;
331 }
332 
333 static int mmhub_v1_0_sw_init(void *handle)
334 {
335 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
336 	struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
337 
338 	hub->ctx0_ptb_addr_lo32 =
339 		SOC15_REG_OFFSET(MMHUB, 0,
340 				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
341 	hub->ctx0_ptb_addr_hi32 =
342 		SOC15_REG_OFFSET(MMHUB, 0,
343 				 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
344 	hub->vm_inv_eng0_req =
345 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
346 	hub->vm_inv_eng0_ack =
347 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ACK);
348 	hub->vm_context0_cntl =
349 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL);
350 	hub->vm_l2_pro_fault_status =
351 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
352 	hub->vm_l2_pro_fault_cntl =
353 		SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
354 
355 	return 0;
356 }
357 
358 static int mmhub_v1_0_sw_fini(void *handle)
359 {
360 	return 0;
361 }
362 
363 static int mmhub_v1_0_hw_init(void *handle)
364 {
365 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
366 	unsigned i;
367 
368 	for (i = 0; i < 18; ++i) {
369 		WREG32(SOC15_REG_OFFSET(MMHUB, 0,
370 					mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32) +
371 		       2 * i, 0xffffffff);
372 		WREG32(SOC15_REG_OFFSET(MMHUB, 0,
373 					mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32) +
374 		       2 * i, 0x1f);
375 	}
376 
377 	return 0;
378 }
379 
380 static int mmhub_v1_0_hw_fini(void *handle)
381 {
382 	return 0;
383 }
384 
385 static int mmhub_v1_0_suspend(void *handle)
386 {
387 	return 0;
388 }
389 
390 static int mmhub_v1_0_resume(void *handle)
391 {
392 	return 0;
393 }
394 
395 static bool mmhub_v1_0_is_idle(void *handle)
396 {
397 	return true;
398 }
399 
400 static int mmhub_v1_0_wait_for_idle(void *handle)
401 {
402 	return 0;
403 }
404 
405 static int mmhub_v1_0_soft_reset(void *handle)
406 {
407 	return 0;
408 }
409 
410 static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
411 							bool enable)
412 {
413 	uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
414 
415 	def  = data  = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
416 
417 	if (adev->asic_type != CHIP_RAVEN) {
418 		def1 = data1 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2));
419 		def2 = data2 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2));
420 	} else
421 		def1 = data1 = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV));
422 
423 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
424 		data |= ATC_L2_MISC_CG__ENABLE_MASK;
425 
426 		data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
427 		           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
428 		           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
429 		           DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
430 		           DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
431 		           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
432 
433 		if (adev->asic_type != CHIP_RAVEN)
434 			data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
435 			           DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
436 			           DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
437 			           DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
438 			           DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
439 			           DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
440 	} else {
441 		data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
442 
443 		data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
444 			  DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
445 			  DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
446 			  DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
447 			  DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
448 			  DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
449 
450 		if (adev->asic_type != CHIP_RAVEN)
451 			data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
452 			          DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
453 			          DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
454 			          DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
455 			          DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
456 			          DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
457 	}
458 
459 	if (def != data)
460 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
461 
462 	if (def1 != data1) {
463 		if (adev->asic_type != CHIP_RAVEN)
464 			WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2), data1);
465 		else
466 			WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV), data1);
467 	}
468 
469 	if (adev->asic_type != CHIP_RAVEN && def2 != data2)
470 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_CNTL_MISC2), data2);
471 }
472 
473 static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
474 						   bool enable)
475 {
476 	uint32_t def, data;
477 
478 	def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
479 
480 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
481 		data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
482 	else
483 		data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
484 
485 	if (def != data)
486 		WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
487 }
488 
489 static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
490 						       bool enable)
491 {
492 	uint32_t def, data;
493 
494 	def = data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
495 
496 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
497 		data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
498 	else
499 		data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
500 
501 	if (def != data)
502 		WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG), data);
503 }
504 
505 static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
506 						  bool enable)
507 {
508 	uint32_t def, data;
509 
510 	def = data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
511 
512 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
513 	    (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
514 		data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
515 	else
516 		data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
517 
518 	if(def != data)
519 		WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL), data);
520 }
521 
522 static int mmhub_v1_0_set_clockgating_state(void *handle,
523 					enum amd_clockgating_state state)
524 {
525 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
526 
527 	if (amdgpu_sriov_vf(adev))
528 		return 0;
529 
530 	switch (adev->asic_type) {
531 	case CHIP_VEGA10:
532 	case CHIP_RAVEN:
533 		mmhub_v1_0_update_medium_grain_clock_gating(adev,
534 				state == AMD_CG_STATE_GATE ? true : false);
535 		athub_update_medium_grain_clock_gating(adev,
536 				state == AMD_CG_STATE_GATE ? true : false);
537 		mmhub_v1_0_update_medium_grain_light_sleep(adev,
538 				state == AMD_CG_STATE_GATE ? true : false);
539 		athub_update_medium_grain_light_sleep(adev,
540 				state == AMD_CG_STATE_GATE ? true : false);
541 		break;
542 	default:
543 		break;
544 	}
545 
546 	return 0;
547 }
548 
549 static void mmhub_v1_0_get_clockgating_state(void *handle, u32 *flags)
550 {
551 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
552 	int data;
553 
554 	if (amdgpu_sriov_vf(adev))
555 		*flags = 0;
556 
557 	/* AMD_CG_SUPPORT_MC_MGCG */
558 	data = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATHUB_MISC_CNTL));
559 	if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
560 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
561 
562 	/* AMD_CG_SUPPORT_MC_LS */
563 	data = RREG32(SOC15_REG_OFFSET(MMHUB, 0, mmATC_L2_MISC_CG));
564 	if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
565 		*flags |= AMD_CG_SUPPORT_MC_LS;
566 }
567 
568 static int mmhub_v1_0_set_powergating_state(void *handle,
569 					enum amd_powergating_state state)
570 {
571 	return 0;
572 }
573 
574 const struct amd_ip_funcs mmhub_v1_0_ip_funcs = {
575 	.name = "mmhub_v1_0",
576 	.early_init = mmhub_v1_0_early_init,
577 	.late_init = mmhub_v1_0_late_init,
578 	.sw_init = mmhub_v1_0_sw_init,
579 	.sw_fini = mmhub_v1_0_sw_fini,
580 	.hw_init = mmhub_v1_0_hw_init,
581 	.hw_fini = mmhub_v1_0_hw_fini,
582 	.suspend = mmhub_v1_0_suspend,
583 	.resume = mmhub_v1_0_resume,
584 	.is_idle = mmhub_v1_0_is_idle,
585 	.wait_for_idle = mmhub_v1_0_wait_for_idle,
586 	.soft_reset = mmhub_v1_0_soft_reset,
587 	.set_clockgating_state = mmhub_v1_0_set_clockgating_state,
588 	.set_powergating_state = mmhub_v1_0_set_powergating_state,
589 	.get_clockgating_state = mmhub_v1_0_get_clockgating_state,
590 };
591 
592 const struct amdgpu_ip_block_version mmhub_v1_0_ip_block =
593 {
594 	.type = AMD_IP_BLOCK_TYPE_MMHUB,
595 	.major = 1,
596 	.minor = 0,
597 	.rev = 0,
598 	.funcs = &mmhub_v1_0_ip_funcs,
599 };
600