1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25 #include "amdgpu.h"
26 #include "amdgpu_atomfirmware.h"
27 #include "gmc_v10_0.h"
28 #include "umc_v8_7.h"
29 
30 #include "athub/athub_2_0_0_sh_mask.h"
31 #include "athub/athub_2_0_0_offset.h"
32 #include "dcn/dcn_2_0_0_offset.h"
33 #include "dcn/dcn_2_0_0_sh_mask.h"
34 #include "oss/osssys_5_0_0_offset.h"
35 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
36 #include "navi10_enum.h"
37 
38 #include "soc15.h"
39 #include "soc15d.h"
40 #include "soc15_common.h"
41 
42 #include "nbio_v2_3.h"
43 
44 #include "gfxhub_v2_0.h"
45 #include "gfxhub_v2_1.h"
46 #include "mmhub_v2_0.h"
47 #include "mmhub_v2_3.h"
48 #include "athub_v2_0.h"
49 #include "athub_v2_1.h"
50 
51 #include "amdgpu_reset.h"
52 
53 #if 0
54 static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
55 {
56 	/* TODO add golden setting for hdp */
57 };
58 #endif
59 
60 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
61 					 struct amdgpu_irq_src *src,
62 					 unsigned type,
63 					 enum amdgpu_interrupt_state state)
64 {
65 	return 0;
66 }
67 
68 static int
69 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
70 				   struct amdgpu_irq_src *src, unsigned type,
71 				   enum amdgpu_interrupt_state state)
72 {
73 	switch (state) {
74 	case AMDGPU_IRQ_STATE_DISABLE:
75 		/* MM HUB */
76 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
77 		/* GFX HUB */
78 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
79 		break;
80 	case AMDGPU_IRQ_STATE_ENABLE:
81 		/* MM HUB */
82 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
83 		/* GFX HUB */
84 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
85 		break;
86 	default:
87 		break;
88 	}
89 
90 	return 0;
91 }
92 
93 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
94 				       struct amdgpu_irq_src *source,
95 				       struct amdgpu_iv_entry *entry)
96 {
97 	bool retry_fault = !!(entry->src_data[1] & 0x80);
98 	bool write_fault = !!(entry->src_data[1] & 0x20);
99 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
100 	struct amdgpu_task_info task_info;
101 	uint32_t status = 0;
102 	u64 addr;
103 
104 	addr = (u64)entry->src_data[0] << 12;
105 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
106 
107 	if (retry_fault) {
108 		/* Returning 1 here also prevents sending the IV to the KFD */
109 
110 		/* Process it onyl if it's the first fault for this address */
111 		if (entry->ih != &adev->irq.ih_soft &&
112 		    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
113 					     entry->timestamp))
114 			return 1;
115 
116 		/* Delegate it to a different ring if the hardware hasn't
117 		 * already done it.
118 		 */
119 		if (entry->ih == &adev->irq.ih) {
120 			amdgpu_irq_delegate(adev, entry, 8);
121 			return 1;
122 		}
123 
124 		/* Try to handle the recoverable page faults by filling page
125 		 * tables
126 		 */
127 		if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
128 			return 1;
129 	}
130 
131 	if (!amdgpu_sriov_vf(adev)) {
132 		/*
133 		 * Issue a dummy read to wait for the status register to
134 		 * be updated to avoid reading an incorrect value due to
135 		 * the new fast GRBM interface.
136 		 */
137 		if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
138 		    (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
139 			RREG32(hub->vm_l2_pro_fault_status);
140 
141 		status = RREG32(hub->vm_l2_pro_fault_status);
142 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
143 	}
144 
145 	if (!printk_ratelimit())
146 		return 0;
147 
148 	memset(&task_info, 0, sizeof(struct amdgpu_task_info));
149 	amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
150 
151 	dev_err(adev->dev,
152 		"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
153 		"for process %s pid %d thread %s pid %d)\n",
154 		entry->vmid_src ? "mmhub" : "gfxhub",
155 		entry->src_id, entry->ring_id, entry->vmid,
156 		entry->pasid, task_info.process_name, task_info.tgid,
157 		task_info.task_name, task_info.pid);
158 	dev_err(adev->dev, "  in page starting at address 0x%016llx from client 0x%x (%s)\n",
159 		addr, entry->client_id,
160 		soc15_ih_clientid_name[entry->client_id]);
161 
162 	if (!amdgpu_sriov_vf(adev))
163 		hub->vmhub_funcs->print_l2_protection_fault_status(adev,
164 								   status);
165 
166 	return 0;
167 }
168 
169 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
170 	.set = gmc_v10_0_vm_fault_interrupt_state,
171 	.process = gmc_v10_0_process_interrupt,
172 };
173 
174 static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
175 	.set = gmc_v10_0_ecc_interrupt_state,
176 	.process = amdgpu_umc_process_ecc_irq,
177 };
178 
179 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
180 {
181 	adev->gmc.vm_fault.num_types = 1;
182 	adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
183 
184 	if (!amdgpu_sriov_vf(adev)) {
185 		adev->gmc.ecc_irq.num_types = 1;
186 		adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
187 	}
188 }
189 
190 /**
191  * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
192  *
193  * @adev: amdgpu_device pointer
194  * @vmhub: vmhub type
195  *
196  */
197 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
198 				       uint32_t vmhub)
199 {
200 	return ((vmhub == AMDGPU_MMHUB_0 ||
201 		 vmhub == AMDGPU_MMHUB_1) &&
202 		(!amdgpu_sriov_vf(adev)));
203 }
204 
205 static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
206 					struct amdgpu_device *adev,
207 					uint8_t vmid, uint16_t *p_pasid)
208 {
209 	uint32_t value;
210 
211 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
212 		     + vmid);
213 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
214 
215 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
216 }
217 
218 /*
219  * GART
220  * VMID 0 is the physical GPU addresses as used by the kernel.
221  * VMIDs 1-15 are used for userspace clients and are handled
222  * by the amdgpu vm/hsa code.
223  */
224 
225 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
226 				   unsigned int vmhub, uint32_t flush_type)
227 {
228 	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
229 	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
230 	u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
231 	u32 tmp;
232 	/* Use register 17 for GART */
233 	const unsigned eng = 17;
234 	unsigned int i;
235 	unsigned char hub_ip = 0;
236 
237 	hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
238 		   GC_HWIP : MMHUB_HWIP;
239 
240 	spin_lock(&adev->gmc.invalidate_lock);
241 	/*
242 	 * It may lose gpuvm invalidate acknowldege state across power-gating
243 	 * off cycle, add semaphore acquire before invalidation and semaphore
244 	 * release after invalidation to avoid entering power gated state
245 	 * to WA the Issue
246 	 */
247 
248 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
249 	if (use_semaphore) {
250 		for (i = 0; i < adev->usec_timeout; i++) {
251 			/* a read return value of 1 means semaphore acuqire */
252 			tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
253 					 hub->eng_distance * eng, hub_ip);
254 
255 			if (tmp & 0x1)
256 				break;
257 			udelay(1);
258 		}
259 
260 		if (i >= adev->usec_timeout)
261 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
262 	}
263 
264 	WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
265 			  hub->eng_distance * eng,
266 			  inv_req, hub_ip);
267 
268 	/*
269 	 * Issue a dummy read to wait for the ACK register to be cleared
270 	 * to avoid a false ACK due to the new fast GRBM interface.
271 	 */
272 	if ((vmhub == AMDGPU_GFXHUB_0) &&
273 	    (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
274 		RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
275 				  hub->eng_distance * eng, hub_ip);
276 
277 	/* Wait for ACK with a delay.*/
278 	for (i = 0; i < adev->usec_timeout; i++) {
279 		tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
280 				  hub->eng_distance * eng, hub_ip);
281 
282 		tmp &= 1 << vmid;
283 		if (tmp)
284 			break;
285 
286 		udelay(1);
287 	}
288 
289 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
290 	if (use_semaphore)
291 		/*
292 		 * add semaphore release after invalidation,
293 		 * write with 0 means semaphore release
294 		 */
295 		WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
296 				  hub->eng_distance * eng, 0, hub_ip);
297 
298 	spin_unlock(&adev->gmc.invalidate_lock);
299 
300 	if (i < adev->usec_timeout)
301 		return;
302 
303 	DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub);
304 }
305 
306 /**
307  * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
308  *
309  * @adev: amdgpu_device pointer
310  * @vmid: vm instance to flush
311  * @vmhub: vmhub type
312  * @flush_type: the flush type
313  *
314  * Flush the TLB for the requested page table.
315  */
316 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
317 					uint32_t vmhub, uint32_t flush_type)
318 {
319 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
320 	struct dma_fence *fence;
321 	struct amdgpu_job *job;
322 
323 	int r;
324 
325 	/* flush hdp cache */
326 	adev->hdp.funcs->flush_hdp(adev, NULL);
327 
328 	/* For SRIOV run time, driver shouldn't access the register through MMIO
329 	 * Directly use kiq to do the vm invalidation instead
330 	 */
331 	if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes &&
332 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
333 	    down_read_trylock(&adev->reset_domain->sem)) {
334 		struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
335 		const unsigned eng = 17;
336 		u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
337 		u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
338 		u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
339 
340 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
341 				1 << vmid);
342 
343 		up_read(&adev->reset_domain->sem);
344 		return;
345 	}
346 
347 	mutex_lock(&adev->mman.gtt_window_lock);
348 
349 	if (vmhub == AMDGPU_MMHUB_0) {
350 		gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
351 		mutex_unlock(&adev->mman.gtt_window_lock);
352 		return;
353 	}
354 
355 	BUG_ON(vmhub != AMDGPU_GFXHUB_0);
356 
357 	if (!adev->mman.buffer_funcs_enabled ||
358 	    !adev->ib_pool_ready ||
359 	    amdgpu_in_reset(adev) ||
360 	    ring->sched.ready == false) {
361 		gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
362 		mutex_unlock(&adev->mman.gtt_window_lock);
363 		return;
364 	}
365 
366 	/* The SDMA on Navi has a bug which can theoretically result in memory
367 	 * corruption if an invalidation happens at the same time as an VA
368 	 * translation. Avoid this by doing the invalidation from the SDMA
369 	 * itself.
370 	 */
371 	r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
372 				     &job);
373 	if (r)
374 		goto error_alloc;
375 
376 	job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
377 	job->vm_needs_flush = true;
378 	job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
379 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
380 	r = amdgpu_job_submit(job, &adev->mman.entity,
381 			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
382 	if (r)
383 		goto error_submit;
384 
385 	mutex_unlock(&adev->mman.gtt_window_lock);
386 
387 	dma_fence_wait(fence, false);
388 	dma_fence_put(fence);
389 
390 	return;
391 
392 error_submit:
393 	amdgpu_job_free(job);
394 
395 error_alloc:
396 	mutex_unlock(&adev->mman.gtt_window_lock);
397 	DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
398 }
399 
400 /**
401  * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
402  *
403  * @adev: amdgpu_device pointer
404  * @pasid: pasid to be flush
405  * @flush_type: the flush type
406  * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
407  *
408  * Flush the TLB for the requested pasid.
409  */
410 static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
411 					uint16_t pasid, uint32_t flush_type,
412 					bool all_hub)
413 {
414 	int vmid, i;
415 	signed long r;
416 	uint32_t seq;
417 	uint16_t queried_pasid;
418 	bool ret;
419 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
420 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
421 
422 	if (amdgpu_emu_mode == 0 && ring->sched.ready) {
423 		spin_lock(&adev->gfx.kiq.ring_lock);
424 		/* 2 dwords flush + 8 dwords fence */
425 		amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
426 		kiq->pmf->kiq_invalidate_tlbs(ring,
427 					pasid, flush_type, all_hub);
428 		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
429 		if (r) {
430 			amdgpu_ring_undo(ring);
431 			spin_unlock(&adev->gfx.kiq.ring_lock);
432 			return -ETIME;
433 		}
434 
435 		amdgpu_ring_commit(ring);
436 		spin_unlock(&adev->gfx.kiq.ring_lock);
437 		r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
438 		if (r < 1) {
439 			dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
440 			return -ETIME;
441 		}
442 
443 		return 0;
444 	}
445 
446 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
447 
448 		ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
449 				&queried_pasid);
450 		if (ret	&& queried_pasid == pasid) {
451 			if (all_hub) {
452 				for (i = 0; i < adev->num_vmhubs; i++)
453 					gmc_v10_0_flush_gpu_tlb(adev, vmid,
454 							i, flush_type);
455 			} else {
456 				gmc_v10_0_flush_gpu_tlb(adev, vmid,
457 						AMDGPU_GFXHUB_0, flush_type);
458 			}
459 			if (!adev->enable_mes)
460 				break;
461 		}
462 	}
463 
464 	return 0;
465 }
466 
467 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
468 					     unsigned vmid, uint64_t pd_addr)
469 {
470 	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
471 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
472 	uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
473 	unsigned eng = ring->vm_inv_eng;
474 
475 	/*
476 	 * It may lose gpuvm invalidate acknowldege state across power-gating
477 	 * off cycle, add semaphore acquire before invalidation and semaphore
478 	 * release after invalidation to avoid entering power gated state
479 	 * to WA the Issue
480 	 */
481 
482 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
483 	if (use_semaphore)
484 		/* a read return value of 1 means semaphore acuqire */
485 		amdgpu_ring_emit_reg_wait(ring,
486 					  hub->vm_inv_eng0_sem +
487 					  hub->eng_distance * eng, 0x1, 0x1);
488 
489 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
490 			      (hub->ctx_addr_distance * vmid),
491 			      lower_32_bits(pd_addr));
492 
493 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
494 			      (hub->ctx_addr_distance * vmid),
495 			      upper_32_bits(pd_addr));
496 
497 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
498 					    hub->eng_distance * eng,
499 					    hub->vm_inv_eng0_ack +
500 					    hub->eng_distance * eng,
501 					    req, 1 << vmid);
502 
503 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
504 	if (use_semaphore)
505 		/*
506 		 * add semaphore release after invalidation,
507 		 * write with 0 means semaphore release
508 		 */
509 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
510 				      hub->eng_distance * eng, 0);
511 
512 	return pd_addr;
513 }
514 
515 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
516 					 unsigned pasid)
517 {
518 	struct amdgpu_device *adev = ring->adev;
519 	uint32_t reg;
520 
521 	/* MES fw manages IH_VMID_x_LUT updating */
522 	if (ring->is_mes_queue)
523 		return;
524 
525 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
526 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
527 	else
528 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
529 
530 	amdgpu_ring_emit_wreg(ring, reg, pasid);
531 }
532 
533 /*
534  * PTE format on NAVI 10:
535  * 63:59 reserved
536  * 58 reserved and for sienna_cichlid is used for MALL noalloc
537  * 57 reserved
538  * 56 F
539  * 55 L
540  * 54 reserved
541  * 53:52 SW
542  * 51 T
543  * 50:48 mtype
544  * 47:12 4k physical page base address
545  * 11:7 fragment
546  * 6 write
547  * 5 read
548  * 4 exe
549  * 3 Z
550  * 2 snooped
551  * 1 system
552  * 0 valid
553  *
554  * PDE format on NAVI 10:
555  * 63:59 block fragment size
556  * 58:55 reserved
557  * 54 P
558  * 53:48 reserved
559  * 47:6 physical base address of PD or PTE
560  * 5:3 reserved
561  * 2 C
562  * 1 system
563  * 0 valid
564  */
565 
566 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
567 {
568 	switch (flags) {
569 	case AMDGPU_VM_MTYPE_DEFAULT:
570 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
571 	case AMDGPU_VM_MTYPE_NC:
572 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
573 	case AMDGPU_VM_MTYPE_WC:
574 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
575 	case AMDGPU_VM_MTYPE_CC:
576 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
577 	case AMDGPU_VM_MTYPE_UC:
578 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
579 	default:
580 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
581 	}
582 }
583 
584 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
585 				 uint64_t *addr, uint64_t *flags)
586 {
587 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
588 		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
589 	BUG_ON(*addr & 0xFFFF00000000003FULL);
590 
591 	if (!adev->gmc.translate_further)
592 		return;
593 
594 	if (level == AMDGPU_VM_PDB1) {
595 		/* Set the block fragment size */
596 		if (!(*flags & AMDGPU_PDE_PTE))
597 			*flags |= AMDGPU_PDE_BFS(0x9);
598 
599 	} else if (level == AMDGPU_VM_PDB0) {
600 		if (*flags & AMDGPU_PDE_PTE)
601 			*flags &= ~AMDGPU_PDE_PTE;
602 		else
603 			*flags |= AMDGPU_PTE_TF;
604 	}
605 }
606 
607 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
608 				 struct amdgpu_bo_va_mapping *mapping,
609 				 uint64_t *flags)
610 {
611 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
612 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
613 
614 	*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
615 	*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
616 
617 	*flags &= ~AMDGPU_PTE_NOALLOC;
618 	*flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
619 
620 	if (mapping->flags & AMDGPU_PTE_PRT) {
621 		*flags |= AMDGPU_PTE_PRT;
622 		*flags |= AMDGPU_PTE_SNOOPED;
623 		*flags |= AMDGPU_PTE_LOG;
624 		*flags |= AMDGPU_PTE_SYSTEM;
625 		*flags &= ~AMDGPU_PTE_VALID;
626 	}
627 }
628 
629 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
630 {
631 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
632 	unsigned size;
633 
634 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
635 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
636 	} else {
637 		u32 viewport;
638 		u32 pitch;
639 
640 		viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
641 		pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
642 		size = (REG_GET_FIELD(viewport,
643 					HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
644 				REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
645 				4);
646 	}
647 
648 	return size;
649 }
650 
651 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
652 	.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
653 	.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
654 	.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
655 	.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
656 	.map_mtype = gmc_v10_0_map_mtype,
657 	.get_vm_pde = gmc_v10_0_get_vm_pde,
658 	.get_vm_pte = gmc_v10_0_get_vm_pte,
659 	.get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
660 };
661 
662 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
663 {
664 	if (adev->gmc.gmc_funcs == NULL)
665 		adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
666 }
667 
668 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
669 {
670 	switch (adev->ip_versions[UMC_HWIP][0]) {
671 	case IP_VERSION(8, 7, 0):
672 		adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
673 		adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
674 		adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
675 		adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
676 		adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
677 		adev->umc.ras = &umc_v8_7_ras;
678 		break;
679 	default:
680 		break;
681 	}
682 	if (adev->umc.ras) {
683 		amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
684 
685 		strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
686 		adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
687 		adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
688 		adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
689 
690 		/* If don't define special ras_late_init function, use default ras_late_init */
691 		if (!adev->umc.ras->ras_block.ras_late_init)
692 				adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
693 
694 		/* If not defined special ras_cb function, use default ras_cb */
695 		if (!adev->umc.ras->ras_block.ras_cb)
696 			adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
697 	}
698 }
699 
700 
701 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
702 {
703 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
704 	case IP_VERSION(2, 3, 0):
705 	case IP_VERSION(2, 4, 0):
706 	case IP_VERSION(2, 4, 1):
707 		adev->mmhub.funcs = &mmhub_v2_3_funcs;
708 		break;
709 	default:
710 		adev->mmhub.funcs = &mmhub_v2_0_funcs;
711 		break;
712 	}
713 }
714 
715 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
716 {
717 	switch (adev->ip_versions[GC_HWIP][0]) {
718 	case IP_VERSION(10, 3, 0):
719 	case IP_VERSION(10, 3, 2):
720 	case IP_VERSION(10, 3, 1):
721 	case IP_VERSION(10, 3, 4):
722 	case IP_VERSION(10, 3, 5):
723 	case IP_VERSION(10, 3, 6):
724 	case IP_VERSION(10, 3, 3):
725 	case IP_VERSION(10, 3, 7):
726 		adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
727 		break;
728 	default:
729 		adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
730 		break;
731 	}
732 }
733 
734 
735 static int gmc_v10_0_early_init(void *handle)
736 {
737 	int r;
738 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
739 
740 	gmc_v10_0_set_mmhub_funcs(adev);
741 	gmc_v10_0_set_gfxhub_funcs(adev);
742 	gmc_v10_0_set_gmc_funcs(adev);
743 	gmc_v10_0_set_irq_funcs(adev);
744 	gmc_v10_0_set_umc_funcs(adev);
745 
746 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
747 	adev->gmc.shared_aperture_end =
748 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
749 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
750 	adev->gmc.private_aperture_end =
751 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
752 
753 	r = amdgpu_gmc_ras_early_init(adev);
754 	if (r)
755 		return r;
756 
757 	return 0;
758 }
759 
760 static int gmc_v10_0_late_init(void *handle)
761 {
762 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
763 	int r;
764 
765 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
766 	if (r)
767 		return r;
768 
769 	r = amdgpu_gmc_ras_late_init(adev);
770 	if (r)
771 		return r;
772 
773 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
774 }
775 
776 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
777 					struct amdgpu_gmc *mc)
778 {
779 	u64 base = 0;
780 
781 	base = adev->gfxhub.funcs->get_fb_location(adev);
782 
783 	/* add the xgmi offset of the physical node */
784 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
785 
786 	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
787 	amdgpu_gmc_gart_location(adev, mc);
788 	amdgpu_gmc_agp_location(adev, mc);
789 
790 	/* base offset of vram pages */
791 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
792 
793 	/* add the xgmi offset of the physical node */
794 	adev->vm_manager.vram_base_offset +=
795 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
796 }
797 
798 /**
799  * gmc_v10_0_mc_init - initialize the memory controller driver params
800  *
801  * @adev: amdgpu_device pointer
802  *
803  * Look up the amount of vram, vram width, and decide how to place
804  * vram and gart within the GPU's physical address space.
805  * Returns 0 for success.
806  */
807 static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
808 {
809 	int r;
810 
811 	/* size in MB on si */
812 	adev->gmc.mc_vram_size =
813 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
814 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
815 
816 	if (!(adev->flags & AMD_IS_APU)) {
817 		r = amdgpu_device_resize_fb_bar(adev);
818 		if (r)
819 			return r;
820 	}
821 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
822 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
823 
824 #ifdef CONFIG_X86_64
825 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
826 		adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
827 		adev->gmc.aper_size = adev->gmc.real_vram_size;
828 	}
829 #endif
830 
831 	/* In case the PCI BAR is larger than the actual amount of vram */
832 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
833 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
834 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
835 
836 	/* set the gart size */
837 	if (amdgpu_gart_size == -1) {
838 		switch (adev->ip_versions[GC_HWIP][0]) {
839 		default:
840 			adev->gmc.gart_size = 512ULL << 20;
841 			break;
842 		case IP_VERSION(10, 3, 1):   /* DCE SG support */
843 		case IP_VERSION(10, 3, 3):   /* DCE SG support */
844 		case IP_VERSION(10, 3, 6):   /* DCE SG support */
845 		case IP_VERSION(10, 3, 7):   /* DCE SG support */
846 			adev->gmc.gart_size = 1024ULL << 20;
847 			break;
848 		}
849 	} else {
850 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
851 	}
852 
853 	gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
854 
855 	return 0;
856 }
857 
858 static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
859 {
860 	int r;
861 
862 	if (adev->gart.bo) {
863 		WARN(1, "NAVI10 PCIE GART already initialized\n");
864 		return 0;
865 	}
866 
867 	/* Initialize common gart structure */
868 	r = amdgpu_gart_init(adev);
869 	if (r)
870 		return r;
871 
872 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
873 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
874 				 AMDGPU_PTE_EXECUTABLE;
875 
876 	return amdgpu_gart_table_vram_alloc(adev);
877 }
878 
879 static int gmc_v10_0_sw_init(void *handle)
880 {
881 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
882 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
883 
884 	adev->gfxhub.funcs->init(adev);
885 
886 	adev->mmhub.funcs->init(adev);
887 
888 	spin_lock_init(&adev->gmc.invalidate_lock);
889 
890 	if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
891 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
892 		adev->gmc.vram_width = 64;
893 	} else if (amdgpu_emu_mode == 1) {
894 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
895 		adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
896 	} else {
897 		r = amdgpu_atomfirmware_get_vram_info(adev,
898 				&vram_width, &vram_type, &vram_vendor);
899 		adev->gmc.vram_width = vram_width;
900 
901 		adev->gmc.vram_type = vram_type;
902 		adev->gmc.vram_vendor = vram_vendor;
903 	}
904 
905 	switch (adev->ip_versions[GC_HWIP][0]) {
906 	case IP_VERSION(10, 3, 0):
907 		adev->gmc.mall_size = 128 * 1024 * 1024;
908 		break;
909 	case IP_VERSION(10, 3, 2):
910 		adev->gmc.mall_size = 96 * 1024 * 1024;
911 		break;
912 	case IP_VERSION(10, 3, 4):
913 		adev->gmc.mall_size = 32 * 1024 * 1024;
914 		break;
915 	case IP_VERSION(10, 3, 5):
916 		adev->gmc.mall_size = 16 * 1024 * 1024;
917 		break;
918 	default:
919 		adev->gmc.mall_size = 0;
920 		break;
921 	}
922 
923 	switch (adev->ip_versions[GC_HWIP][0]) {
924 	case IP_VERSION(10, 1, 10):
925 	case IP_VERSION(10, 1, 1):
926 	case IP_VERSION(10, 1, 2):
927 	case IP_VERSION(10, 1, 3):
928 	case IP_VERSION(10, 1, 4):
929 	case IP_VERSION(10, 3, 0):
930 	case IP_VERSION(10, 3, 2):
931 	case IP_VERSION(10, 3, 1):
932 	case IP_VERSION(10, 3, 4):
933 	case IP_VERSION(10, 3, 5):
934 	case IP_VERSION(10, 3, 6):
935 	case IP_VERSION(10, 3, 3):
936 	case IP_VERSION(10, 3, 7):
937 		adev->num_vmhubs = 2;
938 		/*
939 		 * To fulfill 4-level page support,
940 		 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
941 		 * block size 512 (9bit)
942 		 */
943 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
944 		break;
945 	default:
946 		break;
947 	}
948 
949 	/* This interrupt is VMC page fault.*/
950 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
951 			      VMC_1_0__SRCID__VM_FAULT,
952 			      &adev->gmc.vm_fault);
953 
954 	if (r)
955 		return r;
956 
957 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
958 			      UTCL2_1_0__SRCID__FAULT,
959 			      &adev->gmc.vm_fault);
960 	if (r)
961 		return r;
962 
963 	if (!amdgpu_sriov_vf(adev)) {
964 		/* interrupt sent to DF. */
965 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
966 				      &adev->gmc.ecc_irq);
967 		if (r)
968 			return r;
969 	}
970 
971 	/*
972 	 * Set the internal MC address mask This is the max address of the GPU's
973 	 * internal address space.
974 	 */
975 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
976 
977 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
978 	if (r) {
979 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
980 		return r;
981 	}
982 
983 	r = gmc_v10_0_mc_init(adev);
984 	if (r)
985 		return r;
986 
987 	amdgpu_gmc_get_vbios_allocations(adev);
988 
989 	/* Memory manager */
990 	r = amdgpu_bo_init(adev);
991 	if (r)
992 		return r;
993 
994 	r = gmc_v10_0_gart_init(adev);
995 	if (r)
996 		return r;
997 
998 	/*
999 	 * number of VMs
1000 	 * VMID 0 is reserved for System
1001 	 * amdgpu graphics/compute will use VMIDs 1-7
1002 	 * amdkfd will use VMIDs 8-15
1003 	 */
1004 	adev->vm_manager.first_kfd_vmid = 8;
1005 
1006 	amdgpu_vm_manager_init(adev);
1007 
1008 	return 0;
1009 }
1010 
1011 /**
1012  * gmc_v10_0_gart_fini - vm fini callback
1013  *
1014  * @adev: amdgpu_device pointer
1015  *
1016  * Tears down the driver GART/VM setup (CIK).
1017  */
1018 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
1019 {
1020 	amdgpu_gart_table_vram_free(adev);
1021 }
1022 
1023 static int gmc_v10_0_sw_fini(void *handle)
1024 {
1025 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1026 
1027 	amdgpu_vm_manager_fini(adev);
1028 	gmc_v10_0_gart_fini(adev);
1029 	amdgpu_gem_force_release(adev);
1030 	amdgpu_bo_fini(adev);
1031 
1032 	return 0;
1033 }
1034 
1035 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
1036 {
1037 }
1038 
1039 /**
1040  * gmc_v10_0_gart_enable - gart enable
1041  *
1042  * @adev: amdgpu_device pointer
1043  */
1044 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
1045 {
1046 	int r;
1047 	bool value;
1048 
1049 	if (adev->gart.bo == NULL) {
1050 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1051 		return -EINVAL;
1052 	}
1053 
1054 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
1055 	r = adev->gfxhub.funcs->gart_enable(adev);
1056 	if (r)
1057 		return r;
1058 
1059 	r = adev->mmhub.funcs->gart_enable(adev);
1060 	if (r)
1061 		return r;
1062 
1063 	adev->hdp.funcs->init_registers(adev);
1064 
1065 	/* Flush HDP after it is initialized */
1066 	adev->hdp.funcs->flush_hdp(adev, NULL);
1067 
1068 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1069 		false : true;
1070 
1071 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1072 	adev->mmhub.funcs->set_fault_enable_default(adev, value);
1073 	gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1074 	gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1075 
1076 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1077 		 (unsigned)(adev->gmc.gart_size >> 20),
1078 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1079 
1080 	return 0;
1081 }
1082 
1083 static int gmc_v10_0_hw_init(void *handle)
1084 {
1085 	int r;
1086 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1087 
1088 	/* The sequence of these two function calls matters.*/
1089 	gmc_v10_0_init_golden_registers(adev);
1090 
1091 	/*
1092 	 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
1093 	 * register setup within GMC, or else system hang when harvesting SA.
1094 	 */
1095 	if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1096 		adev->gfxhub.funcs->utcl2_harvest(adev);
1097 
1098 	r = gmc_v10_0_gart_enable(adev);
1099 	if (r)
1100 		return r;
1101 
1102 	if (amdgpu_emu_mode == 1) {
1103 		r = amdgpu_gmc_vram_checking(adev);
1104 		if (r)
1105 			return r;
1106 	}
1107 
1108 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1109 		adev->umc.funcs->init_registers(adev);
1110 
1111 	return 0;
1112 }
1113 
1114 /**
1115  * gmc_v10_0_gart_disable - gart disable
1116  *
1117  * @adev: amdgpu_device pointer
1118  *
1119  * This disables all VM page table.
1120  */
1121 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1122 {
1123 	adev->gfxhub.funcs->gart_disable(adev);
1124 	adev->mmhub.funcs->gart_disable(adev);
1125 }
1126 
1127 static int gmc_v10_0_hw_fini(void *handle)
1128 {
1129 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1130 
1131 	gmc_v10_0_gart_disable(adev);
1132 
1133 	if (amdgpu_sriov_vf(adev)) {
1134 		/* full access mode, so don't touch any GMC register */
1135 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1136 		return 0;
1137 	}
1138 
1139 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1140 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1141 
1142 	return 0;
1143 }
1144 
1145 static int gmc_v10_0_suspend(void *handle)
1146 {
1147 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1148 
1149 	gmc_v10_0_hw_fini(adev);
1150 
1151 	return 0;
1152 }
1153 
1154 static int gmc_v10_0_resume(void *handle)
1155 {
1156 	int r;
1157 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1158 
1159 	r = gmc_v10_0_hw_init(adev);
1160 	if (r)
1161 		return r;
1162 
1163 	amdgpu_vmid_reset_all(adev);
1164 
1165 	return 0;
1166 }
1167 
1168 static bool gmc_v10_0_is_idle(void *handle)
1169 {
1170 	/* MC is always ready in GMC v10.*/
1171 	return true;
1172 }
1173 
1174 static int gmc_v10_0_wait_for_idle(void *handle)
1175 {
1176 	/* There is no need to wait for MC idle in GMC v10.*/
1177 	return 0;
1178 }
1179 
1180 static int gmc_v10_0_soft_reset(void *handle)
1181 {
1182 	return 0;
1183 }
1184 
1185 static int gmc_v10_0_set_clockgating_state(void *handle,
1186 					   enum amd_clockgating_state state)
1187 {
1188 	int r;
1189 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1190 
1191 	/*
1192 	 * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
1193 	 * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
1194 	 * seen any issue on the DF 3.0.2 series platform.
1195 	 */
1196 	if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {
1197 		dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
1198 		return 0;
1199 	}
1200 
1201 	r = adev->mmhub.funcs->set_clockgating(adev, state);
1202 	if (r)
1203 		return r;
1204 
1205 	if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1206 		return athub_v2_1_set_clockgating(adev, state);
1207 	else
1208 		return athub_v2_0_set_clockgating(adev, state);
1209 }
1210 
1211 static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
1212 {
1213 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1214 
1215 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3) ||
1216 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4))
1217 		return;
1218 
1219 	adev->mmhub.funcs->get_clockgating(adev, flags);
1220 
1221 	if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1222 		athub_v2_1_get_clockgating(adev, flags);
1223 	else
1224 		athub_v2_0_get_clockgating(adev, flags);
1225 }
1226 
1227 static int gmc_v10_0_set_powergating_state(void *handle,
1228 					   enum amd_powergating_state state)
1229 {
1230 	return 0;
1231 }
1232 
1233 const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1234 	.name = "gmc_v10_0",
1235 	.early_init = gmc_v10_0_early_init,
1236 	.late_init = gmc_v10_0_late_init,
1237 	.sw_init = gmc_v10_0_sw_init,
1238 	.sw_fini = gmc_v10_0_sw_fini,
1239 	.hw_init = gmc_v10_0_hw_init,
1240 	.hw_fini = gmc_v10_0_hw_fini,
1241 	.suspend = gmc_v10_0_suspend,
1242 	.resume = gmc_v10_0_resume,
1243 	.is_idle = gmc_v10_0_is_idle,
1244 	.wait_for_idle = gmc_v10_0_wait_for_idle,
1245 	.soft_reset = gmc_v10_0_soft_reset,
1246 	.set_clockgating_state = gmc_v10_0_set_clockgating_state,
1247 	.set_powergating_state = gmc_v10_0_set_powergating_state,
1248 	.get_clockgating_state = gmc_v10_0_get_clockgating_state,
1249 };
1250 
1251 const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1252 {
1253 	.type = AMD_IP_BLOCK_TYPE_GMC,
1254 	.major = 10,
1255 	.minor = 0,
1256 	.rev = 0,
1257 	.funcs = &gmc_v10_0_ip_funcs,
1258 };
1259