1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25 #include "amdgpu.h"
26 #include "amdgpu_atomfirmware.h"
27 #include "gmc_v10_0.h"
28 #include "umc_v8_7.h"
29 
30 #include "athub/athub_2_0_0_sh_mask.h"
31 #include "athub/athub_2_0_0_offset.h"
32 #include "dcn/dcn_2_0_0_offset.h"
33 #include "dcn/dcn_2_0_0_sh_mask.h"
34 #include "oss/osssys_5_0_0_offset.h"
35 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
36 #include "navi10_enum.h"
37 
38 #include "soc15.h"
39 #include "soc15d.h"
40 #include "soc15_common.h"
41 
42 #include "nbio_v2_3.h"
43 
44 #include "gfxhub_v2_0.h"
45 #include "gfxhub_v2_1.h"
46 #include "mmhub_v2_0.h"
47 #include "mmhub_v2_3.h"
48 #include "athub_v2_0.h"
49 #include "athub_v2_1.h"
50 
51 #if 0
52 static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
53 {
54 	/* TODO add golden setting for hdp */
55 };
56 #endif
57 
58 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
59 					 struct amdgpu_irq_src *src,
60 					 unsigned type,
61 					 enum amdgpu_interrupt_state state)
62 {
63 	return 0;
64 }
65 
66 static int
67 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
68 				   struct amdgpu_irq_src *src, unsigned type,
69 				   enum amdgpu_interrupt_state state)
70 {
71 	switch (state) {
72 	case AMDGPU_IRQ_STATE_DISABLE:
73 		/* MM HUB */
74 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
75 		/* GFX HUB */
76 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
77 		break;
78 	case AMDGPU_IRQ_STATE_ENABLE:
79 		/* MM HUB */
80 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
81 		/* GFX HUB */
82 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
83 		break;
84 	default:
85 		break;
86 	}
87 
88 	return 0;
89 }
90 
91 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
92 				       struct amdgpu_irq_src *source,
93 				       struct amdgpu_iv_entry *entry)
94 {
95 	bool retry_fault = !!(entry->src_data[1] & 0x80);
96 	bool write_fault = !!(entry->src_data[1] & 0x20);
97 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
98 	struct amdgpu_task_info task_info;
99 	uint32_t status = 0;
100 	u64 addr;
101 
102 	addr = (u64)entry->src_data[0] << 12;
103 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
104 
105 	if (retry_fault) {
106 		/* Returning 1 here also prevents sending the IV to the KFD */
107 
108 		/* Process it onyl if it's the first fault for this address */
109 		if (entry->ih != &adev->irq.ih_soft &&
110 		    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
111 					     entry->timestamp))
112 			return 1;
113 
114 		/* Delegate it to a different ring if the hardware hasn't
115 		 * already done it.
116 		 */
117 		if (entry->ih == &adev->irq.ih) {
118 			amdgpu_irq_delegate(adev, entry, 8);
119 			return 1;
120 		}
121 
122 		/* Try to handle the recoverable page faults by filling page
123 		 * tables
124 		 */
125 		if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
126 			return 1;
127 	}
128 
129 	if (!amdgpu_sriov_vf(adev)) {
130 		/*
131 		 * Issue a dummy read to wait for the status register to
132 		 * be updated to avoid reading an incorrect value due to
133 		 * the new fast GRBM interface.
134 		 */
135 		if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
136 		    (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
137 			RREG32(hub->vm_l2_pro_fault_status);
138 
139 		status = RREG32(hub->vm_l2_pro_fault_status);
140 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
141 	}
142 
143 	if (!printk_ratelimit())
144 		return 0;
145 
146 	memset(&task_info, 0, sizeof(struct amdgpu_task_info));
147 	amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
148 
149 	dev_err(adev->dev,
150 		"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
151 		"for process %s pid %d thread %s pid %d)\n",
152 		entry->vmid_src ? "mmhub" : "gfxhub",
153 		entry->src_id, entry->ring_id, entry->vmid,
154 		entry->pasid, task_info.process_name, task_info.tgid,
155 		task_info.task_name, task_info.pid);
156 	dev_err(adev->dev, "  in page starting at address 0x%016llx from client 0x%x (%s)\n",
157 		addr, entry->client_id,
158 		soc15_ih_clientid_name[entry->client_id]);
159 
160 	if (!amdgpu_sriov_vf(adev))
161 		hub->vmhub_funcs->print_l2_protection_fault_status(adev,
162 								   status);
163 
164 	return 0;
165 }
166 
167 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
168 	.set = gmc_v10_0_vm_fault_interrupt_state,
169 	.process = gmc_v10_0_process_interrupt,
170 };
171 
172 static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
173 	.set = gmc_v10_0_ecc_interrupt_state,
174 	.process = amdgpu_umc_process_ecc_irq,
175 };
176 
177 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
178 {
179 	adev->gmc.vm_fault.num_types = 1;
180 	adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
181 
182 	if (!amdgpu_sriov_vf(adev)) {
183 		adev->gmc.ecc_irq.num_types = 1;
184 		adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
185 	}
186 }
187 
188 /**
189  * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
190  *
191  * @adev: amdgpu_device pointer
192  * @vmhub: vmhub type
193  *
194  */
195 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
196 				       uint32_t vmhub)
197 {
198 	return ((vmhub == AMDGPU_MMHUB_0 ||
199 		 vmhub == AMDGPU_MMHUB_1) &&
200 		(!amdgpu_sriov_vf(adev)));
201 }
202 
203 static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
204 					struct amdgpu_device *adev,
205 					uint8_t vmid, uint16_t *p_pasid)
206 {
207 	uint32_t value;
208 
209 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
210 		     + vmid);
211 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
212 
213 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
214 }
215 
216 /*
217  * GART
218  * VMID 0 is the physical GPU addresses as used by the kernel.
219  * VMIDs 1-15 are used for userspace clients and are handled
220  * by the amdgpu vm/hsa code.
221  */
222 
223 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
224 				   unsigned int vmhub, uint32_t flush_type)
225 {
226 	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
227 	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
228 	u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
229 	u32 tmp;
230 	/* Use register 17 for GART */
231 	const unsigned eng = 17;
232 	unsigned int i;
233 	unsigned char hub_ip = 0;
234 
235 	hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
236 		   GC_HWIP : MMHUB_HWIP;
237 
238 	spin_lock(&adev->gmc.invalidate_lock);
239 	/*
240 	 * It may lose gpuvm invalidate acknowldege state across power-gating
241 	 * off cycle, add semaphore acquire before invalidation and semaphore
242 	 * release after invalidation to avoid entering power gated state
243 	 * to WA the Issue
244 	 */
245 
246 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
247 	if (use_semaphore) {
248 		for (i = 0; i < adev->usec_timeout; i++) {
249 			/* a read return value of 1 means semaphore acuqire */
250 			tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
251 					 hub->eng_distance * eng, hub_ip);
252 
253 			if (tmp & 0x1)
254 				break;
255 			udelay(1);
256 		}
257 
258 		if (i >= adev->usec_timeout)
259 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
260 	}
261 
262 	WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
263 			  hub->eng_distance * eng,
264 			  inv_req, hub_ip);
265 
266 	/*
267 	 * Issue a dummy read to wait for the ACK register to be cleared
268 	 * to avoid a false ACK due to the new fast GRBM interface.
269 	 */
270 	if ((vmhub == AMDGPU_GFXHUB_0) &&
271 	    (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
272 		RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
273 				  hub->eng_distance * eng, hub_ip);
274 
275 	/* Wait for ACK with a delay.*/
276 	for (i = 0; i < adev->usec_timeout; i++) {
277 		tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
278 				  hub->eng_distance * eng, hub_ip);
279 
280 		tmp &= 1 << vmid;
281 		if (tmp)
282 			break;
283 
284 		udelay(1);
285 	}
286 
287 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
288 	if (use_semaphore)
289 		/*
290 		 * add semaphore release after invalidation,
291 		 * write with 0 means semaphore release
292 		 */
293 		WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
294 				  hub->eng_distance * eng, 0, hub_ip);
295 
296 	spin_unlock(&adev->gmc.invalidate_lock);
297 
298 	if (i < adev->usec_timeout)
299 		return;
300 
301 	DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub);
302 }
303 
304 /**
305  * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
306  *
307  * @adev: amdgpu_device pointer
308  * @vmid: vm instance to flush
309  * @vmhub: vmhub type
310  * @flush_type: the flush type
311  *
312  * Flush the TLB for the requested page table.
313  */
314 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
315 					uint32_t vmhub, uint32_t flush_type)
316 {
317 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
318 	struct dma_fence *fence;
319 	struct amdgpu_job *job;
320 
321 	int r;
322 
323 	/* flush hdp cache */
324 	adev->hdp.funcs->flush_hdp(adev, NULL);
325 
326 	/* For SRIOV run time, driver shouldn't access the register through MMIO
327 	 * Directly use kiq to do the vm invalidation instead
328 	 */
329 	if (adev->gfx.kiq.ring.sched.ready &&
330 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
331 	    down_read_trylock(&adev->reset_sem)) {
332 		struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
333 		const unsigned eng = 17;
334 		u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
335 		u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
336 		u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
337 
338 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
339 				1 << vmid);
340 
341 		up_read(&adev->reset_sem);
342 		return;
343 	}
344 
345 	mutex_lock(&adev->mman.gtt_window_lock);
346 
347 	if (vmhub == AMDGPU_MMHUB_0) {
348 		gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
349 		mutex_unlock(&adev->mman.gtt_window_lock);
350 		return;
351 	}
352 
353 	BUG_ON(vmhub != AMDGPU_GFXHUB_0);
354 
355 	if (!adev->mman.buffer_funcs_enabled ||
356 	    !adev->ib_pool_ready ||
357 	    amdgpu_in_reset(adev) ||
358 	    ring->sched.ready == false) {
359 		gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
360 		mutex_unlock(&adev->mman.gtt_window_lock);
361 		return;
362 	}
363 
364 	/* The SDMA on Navi has a bug which can theoretically result in memory
365 	 * corruption if an invalidation happens at the same time as an VA
366 	 * translation. Avoid this by doing the invalidation from the SDMA
367 	 * itself.
368 	 */
369 	r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
370 				     &job);
371 	if (r)
372 		goto error_alloc;
373 
374 	job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
375 	job->vm_needs_flush = true;
376 	job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
377 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
378 	r = amdgpu_job_submit(job, &adev->mman.entity,
379 			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
380 	if (r)
381 		goto error_submit;
382 
383 	mutex_unlock(&adev->mman.gtt_window_lock);
384 
385 	dma_fence_wait(fence, false);
386 	dma_fence_put(fence);
387 
388 	return;
389 
390 error_submit:
391 	amdgpu_job_free(job);
392 
393 error_alloc:
394 	mutex_unlock(&adev->mman.gtt_window_lock);
395 	DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
396 }
397 
398 /**
399  * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
400  *
401  * @adev: amdgpu_device pointer
402  * @pasid: pasid to be flush
403  * @flush_type: the flush type
404  * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
405  *
406  * Flush the TLB for the requested pasid.
407  */
408 static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
409 					uint16_t pasid, uint32_t flush_type,
410 					bool all_hub)
411 {
412 	int vmid, i;
413 	signed long r;
414 	uint32_t seq;
415 	uint16_t queried_pasid;
416 	bool ret;
417 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
418 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
419 
420 	if (amdgpu_emu_mode == 0 && ring->sched.ready) {
421 		spin_lock(&adev->gfx.kiq.ring_lock);
422 		/* 2 dwords flush + 8 dwords fence */
423 		amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
424 		kiq->pmf->kiq_invalidate_tlbs(ring,
425 					pasid, flush_type, all_hub);
426 		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
427 		if (r) {
428 			amdgpu_ring_undo(ring);
429 			spin_unlock(&adev->gfx.kiq.ring_lock);
430 			return -ETIME;
431 		}
432 
433 		amdgpu_ring_commit(ring);
434 		spin_unlock(&adev->gfx.kiq.ring_lock);
435 		r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
436 		if (r < 1) {
437 			dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
438 			return -ETIME;
439 		}
440 
441 		return 0;
442 	}
443 
444 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
445 
446 		ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
447 				&queried_pasid);
448 		if (ret	&& queried_pasid == pasid) {
449 			if (all_hub) {
450 				for (i = 0; i < adev->num_vmhubs; i++)
451 					gmc_v10_0_flush_gpu_tlb(adev, vmid,
452 							i, flush_type);
453 			} else {
454 				gmc_v10_0_flush_gpu_tlb(adev, vmid,
455 						AMDGPU_GFXHUB_0, flush_type);
456 			}
457 			break;
458 		}
459 	}
460 
461 	return 0;
462 }
463 
464 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
465 					     unsigned vmid, uint64_t pd_addr)
466 {
467 	bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
468 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
469 	uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
470 	unsigned eng = ring->vm_inv_eng;
471 
472 	/*
473 	 * It may lose gpuvm invalidate acknowldege state across power-gating
474 	 * off cycle, add semaphore acquire before invalidation and semaphore
475 	 * release after invalidation to avoid entering power gated state
476 	 * to WA the Issue
477 	 */
478 
479 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
480 	if (use_semaphore)
481 		/* a read return value of 1 means semaphore acuqire */
482 		amdgpu_ring_emit_reg_wait(ring,
483 					  hub->vm_inv_eng0_sem +
484 					  hub->eng_distance * eng, 0x1, 0x1);
485 
486 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
487 			      (hub->ctx_addr_distance * vmid),
488 			      lower_32_bits(pd_addr));
489 
490 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
491 			      (hub->ctx_addr_distance * vmid),
492 			      upper_32_bits(pd_addr));
493 
494 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
495 					    hub->eng_distance * eng,
496 					    hub->vm_inv_eng0_ack +
497 					    hub->eng_distance * eng,
498 					    req, 1 << vmid);
499 
500 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
501 	if (use_semaphore)
502 		/*
503 		 * add semaphore release after invalidation,
504 		 * write with 0 means semaphore release
505 		 */
506 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
507 				      hub->eng_distance * eng, 0);
508 
509 	return pd_addr;
510 }
511 
512 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
513 					 unsigned pasid)
514 {
515 	struct amdgpu_device *adev = ring->adev;
516 	uint32_t reg;
517 
518 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
519 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
520 	else
521 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
522 
523 	amdgpu_ring_emit_wreg(ring, reg, pasid);
524 }
525 
526 /*
527  * PTE format on NAVI 10:
528  * 63:59 reserved
529  * 58 reserved and for sienna_cichlid is used for MALL noalloc
530  * 57 reserved
531  * 56 F
532  * 55 L
533  * 54 reserved
534  * 53:52 SW
535  * 51 T
536  * 50:48 mtype
537  * 47:12 4k physical page base address
538  * 11:7 fragment
539  * 6 write
540  * 5 read
541  * 4 exe
542  * 3 Z
543  * 2 snooped
544  * 1 system
545  * 0 valid
546  *
547  * PDE format on NAVI 10:
548  * 63:59 block fragment size
549  * 58:55 reserved
550  * 54 P
551  * 53:48 reserved
552  * 47:6 physical base address of PD or PTE
553  * 5:3 reserved
554  * 2 C
555  * 1 system
556  * 0 valid
557  */
558 
559 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
560 {
561 	switch (flags) {
562 	case AMDGPU_VM_MTYPE_DEFAULT:
563 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
564 	case AMDGPU_VM_MTYPE_NC:
565 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
566 	case AMDGPU_VM_MTYPE_WC:
567 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
568 	case AMDGPU_VM_MTYPE_CC:
569 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
570 	case AMDGPU_VM_MTYPE_UC:
571 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
572 	default:
573 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
574 	}
575 }
576 
577 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
578 				 uint64_t *addr, uint64_t *flags)
579 {
580 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
581 		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
582 	BUG_ON(*addr & 0xFFFF00000000003FULL);
583 
584 	if (!adev->gmc.translate_further)
585 		return;
586 
587 	if (level == AMDGPU_VM_PDB1) {
588 		/* Set the block fragment size */
589 		if (!(*flags & AMDGPU_PDE_PTE))
590 			*flags |= AMDGPU_PDE_BFS(0x9);
591 
592 	} else if (level == AMDGPU_VM_PDB0) {
593 		if (*flags & AMDGPU_PDE_PTE)
594 			*flags &= ~AMDGPU_PDE_PTE;
595 		else
596 			*flags |= AMDGPU_PTE_TF;
597 	}
598 }
599 
600 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
601 				 struct amdgpu_bo_va_mapping *mapping,
602 				 uint64_t *flags)
603 {
604 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
605 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
606 
607 	*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
608 	*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
609 
610 	if (mapping->flags & AMDGPU_PTE_PRT) {
611 		*flags |= AMDGPU_PTE_PRT;
612 		*flags |= AMDGPU_PTE_SNOOPED;
613 		*flags |= AMDGPU_PTE_LOG;
614 		*flags |= AMDGPU_PTE_SYSTEM;
615 		*flags &= ~AMDGPU_PTE_VALID;
616 	}
617 }
618 
619 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
620 {
621 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
622 	unsigned size;
623 
624 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
625 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
626 	} else {
627 		u32 viewport;
628 		u32 pitch;
629 
630 		viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
631 		pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
632 		size = (REG_GET_FIELD(viewport,
633 					HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
634 				REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
635 				4);
636 	}
637 
638 	return size;
639 }
640 
641 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
642 	.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
643 	.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
644 	.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
645 	.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
646 	.map_mtype = gmc_v10_0_map_mtype,
647 	.get_vm_pde = gmc_v10_0_get_vm_pde,
648 	.get_vm_pte = gmc_v10_0_get_vm_pte,
649 	.get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
650 };
651 
652 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
653 {
654 	if (adev->gmc.gmc_funcs == NULL)
655 		adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
656 }
657 
658 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
659 {
660 	switch (adev->ip_versions[UMC_HWIP][0]) {
661 	case IP_VERSION(8, 7, 0):
662 		adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
663 		adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
664 		adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
665 		adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
666 		adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
667 		adev->umc.ras = &umc_v8_7_ras;
668 		break;
669 	default:
670 		break;
671 	}
672 	if (adev->umc.ras) {
673 		amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
674 
675 		strcpy(adev->umc.ras->ras_block.name, "umc");
676 		adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC;
677 
678 		/* If don't define special ras_late_init function, use default ras_late_init */
679 		if (!adev->umc.ras->ras_block.ras_late_init)
680 				adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
681 
682 		/* If don't define special ras_fini function, use default ras_fini */
683 		if (!adev->umc.ras->ras_block.ras_fini)
684 				adev->umc.ras->ras_block.ras_fini = amdgpu_umc_ras_fini;
685 	}
686 }
687 
688 
689 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
690 {
691 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
692 	case IP_VERSION(2, 3, 0):
693 	case IP_VERSION(2, 4, 0):
694 		adev->mmhub.funcs = &mmhub_v2_3_funcs;
695 		break;
696 	default:
697 		adev->mmhub.funcs = &mmhub_v2_0_funcs;
698 		break;
699 	}
700 }
701 
702 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
703 {
704 	switch (adev->ip_versions[GC_HWIP][0]) {
705 	case IP_VERSION(10, 3, 0):
706 	case IP_VERSION(10, 3, 2):
707 	case IP_VERSION(10, 3, 1):
708 	case IP_VERSION(10, 3, 4):
709 	case IP_VERSION(10, 3, 5):
710 	case IP_VERSION(10, 3, 3):
711 		adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
712 		break;
713 	default:
714 		adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
715 		break;
716 	}
717 }
718 
719 
720 static int gmc_v10_0_early_init(void *handle)
721 {
722 	int r;
723 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
724 
725 	gmc_v10_0_set_mmhub_funcs(adev);
726 	gmc_v10_0_set_gfxhub_funcs(adev);
727 	gmc_v10_0_set_gmc_funcs(adev);
728 	gmc_v10_0_set_irq_funcs(adev);
729 	gmc_v10_0_set_umc_funcs(adev);
730 
731 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
732 	adev->gmc.shared_aperture_end =
733 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
734 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
735 	adev->gmc.private_aperture_end =
736 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
737 
738 	r = amdgpu_gmc_ras_early_init(adev);
739 	if (r)
740 		return r;
741 
742 	return 0;
743 }
744 
745 static int gmc_v10_0_late_init(void *handle)
746 {
747 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
748 	int r;
749 
750 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
751 	if (r)
752 		return r;
753 
754 	r = amdgpu_gmc_ras_late_init(adev);
755 	if (r)
756 		return r;
757 
758 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
759 }
760 
761 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
762 					struct amdgpu_gmc *mc)
763 {
764 	u64 base = 0;
765 
766 	base = adev->gfxhub.funcs->get_fb_location(adev);
767 
768 	/* add the xgmi offset of the physical node */
769 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
770 
771 	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
772 	amdgpu_gmc_gart_location(adev, mc);
773 	amdgpu_gmc_agp_location(adev, mc);
774 
775 	/* base offset of vram pages */
776 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
777 
778 	/* add the xgmi offset of the physical node */
779 	adev->vm_manager.vram_base_offset +=
780 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
781 }
782 
783 /**
784  * gmc_v10_0_mc_init - initialize the memory controller driver params
785  *
786  * @adev: amdgpu_device pointer
787  *
788  * Look up the amount of vram, vram width, and decide how to place
789  * vram and gart within the GPU's physical address space.
790  * Returns 0 for success.
791  */
792 static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
793 {
794 	int r;
795 
796 	/* size in MB on si */
797 	adev->gmc.mc_vram_size =
798 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
799 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
800 
801 	if (!(adev->flags & AMD_IS_APU)) {
802 		r = amdgpu_device_resize_fb_bar(adev);
803 		if (r)
804 			return r;
805 	}
806 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
807 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
808 
809 #ifdef CONFIG_X86_64
810 	if (adev->flags & AMD_IS_APU) {
811 		adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
812 		adev->gmc.aper_size = adev->gmc.real_vram_size;
813 	}
814 #endif
815 
816 	/* In case the PCI BAR is larger than the actual amount of vram */
817 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
818 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
819 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
820 
821 	/* set the gart size */
822 	if (amdgpu_gart_size == -1)
823 		adev->gmc.gart_size = 512ULL << 20;
824 	else
825 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
826 
827 	gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
828 
829 	return 0;
830 }
831 
832 static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
833 {
834 	int r;
835 
836 	if (adev->gart.bo) {
837 		WARN(1, "NAVI10 PCIE GART already initialized\n");
838 		return 0;
839 	}
840 
841 	/* Initialize common gart structure */
842 	r = amdgpu_gart_init(adev);
843 	if (r)
844 		return r;
845 
846 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
847 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
848 				 AMDGPU_PTE_EXECUTABLE;
849 
850 	return amdgpu_gart_table_vram_alloc(adev);
851 }
852 
853 static int gmc_v10_0_sw_init(void *handle)
854 {
855 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
856 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
857 
858 	adev->gfxhub.funcs->init(adev);
859 
860 	adev->mmhub.funcs->init(adev);
861 
862 	spin_lock_init(&adev->gmc.invalidate_lock);
863 
864 	if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
865 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
866 		adev->gmc.vram_width = 64;
867 	} else if (amdgpu_emu_mode == 1) {
868 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
869 		adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
870 	} else {
871 		r = amdgpu_atomfirmware_get_vram_info(adev,
872 				&vram_width, &vram_type, &vram_vendor);
873 		adev->gmc.vram_width = vram_width;
874 
875 		adev->gmc.vram_type = vram_type;
876 		adev->gmc.vram_vendor = vram_vendor;
877 	}
878 
879 	switch (adev->ip_versions[GC_HWIP][0]) {
880 	case IP_VERSION(10, 1, 10):
881 	case IP_VERSION(10, 1, 1):
882 	case IP_VERSION(10, 1, 2):
883 	case IP_VERSION(10, 1, 3):
884 	case IP_VERSION(10, 3, 0):
885 	case IP_VERSION(10, 3, 2):
886 	case IP_VERSION(10, 3, 1):
887 	case IP_VERSION(10, 3, 4):
888 	case IP_VERSION(10, 3, 5):
889 	case IP_VERSION(10, 3, 3):
890 		adev->num_vmhubs = 2;
891 		/*
892 		 * To fulfill 4-level page support,
893 		 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
894 		 * block size 512 (9bit)
895 		 */
896 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
897 		break;
898 	default:
899 		break;
900 	}
901 
902 	/* This interrupt is VMC page fault.*/
903 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
904 			      VMC_1_0__SRCID__VM_FAULT,
905 			      &adev->gmc.vm_fault);
906 
907 	if (r)
908 		return r;
909 
910 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
911 			      UTCL2_1_0__SRCID__FAULT,
912 			      &adev->gmc.vm_fault);
913 	if (r)
914 		return r;
915 
916 	if (!amdgpu_sriov_vf(adev)) {
917 		/* interrupt sent to DF. */
918 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
919 				      &adev->gmc.ecc_irq);
920 		if (r)
921 			return r;
922 	}
923 
924 	/*
925 	 * Set the internal MC address mask This is the max address of the GPU's
926 	 * internal address space.
927 	 */
928 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
929 
930 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
931 	if (r) {
932 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
933 		return r;
934 	}
935 
936 	r = gmc_v10_0_mc_init(adev);
937 	if (r)
938 		return r;
939 
940 	amdgpu_gmc_get_vbios_allocations(adev);
941 	amdgpu_gmc_get_reserved_allocation(adev);
942 
943 	/* Memory manager */
944 	r = amdgpu_bo_init(adev);
945 	if (r)
946 		return r;
947 
948 	r = gmc_v10_0_gart_init(adev);
949 	if (r)
950 		return r;
951 
952 	/*
953 	 * number of VMs
954 	 * VMID 0 is reserved for System
955 	 * amdgpu graphics/compute will use VMIDs 1-7
956 	 * amdkfd will use VMIDs 8-15
957 	 */
958 	adev->vm_manager.first_kfd_vmid = 8;
959 
960 	amdgpu_vm_manager_init(adev);
961 
962 	return 0;
963 }
964 
965 /**
966  * gmc_v10_0_gart_fini - vm fini callback
967  *
968  * @adev: amdgpu_device pointer
969  *
970  * Tears down the driver GART/VM setup (CIK).
971  */
972 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
973 {
974 	amdgpu_gart_table_vram_free(adev);
975 }
976 
977 static int gmc_v10_0_sw_fini(void *handle)
978 {
979 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
980 
981 	amdgpu_vm_manager_fini(adev);
982 	gmc_v10_0_gart_fini(adev);
983 	amdgpu_gem_force_release(adev);
984 	amdgpu_bo_fini(adev);
985 
986 	return 0;
987 }
988 
989 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
990 {
991 }
992 
993 /**
994  * gmc_v10_0_gart_enable - gart enable
995  *
996  * @adev: amdgpu_device pointer
997  */
998 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
999 {
1000 	int r;
1001 	bool value;
1002 
1003 	if (adev->gart.bo == NULL) {
1004 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1005 		return -EINVAL;
1006 	}
1007 
1008 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
1009 	r = adev->gfxhub.funcs->gart_enable(adev);
1010 	if (r)
1011 		return r;
1012 
1013 	r = adev->mmhub.funcs->gart_enable(adev);
1014 	if (r)
1015 		return r;
1016 
1017 	adev->hdp.funcs->init_registers(adev);
1018 
1019 	/* Flush HDP after it is initialized */
1020 	adev->hdp.funcs->flush_hdp(adev, NULL);
1021 
1022 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1023 		false : true;
1024 
1025 	adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1026 	adev->mmhub.funcs->set_fault_enable_default(adev, value);
1027 	gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1028 	gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1029 
1030 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1031 		 (unsigned)(adev->gmc.gart_size >> 20),
1032 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1033 
1034 	return 0;
1035 }
1036 
1037 static int gmc_v10_0_hw_init(void *handle)
1038 {
1039 	int r;
1040 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1041 
1042 	/* The sequence of these two function calls matters.*/
1043 	gmc_v10_0_init_golden_registers(adev);
1044 
1045 	/*
1046 	 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
1047 	 * register setup within GMC, or else system hang when harvesting SA.
1048 	 */
1049 	if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1050 		adev->gfxhub.funcs->utcl2_harvest(adev);
1051 
1052 	r = gmc_v10_0_gart_enable(adev);
1053 	if (r)
1054 		return r;
1055 
1056 	if (amdgpu_emu_mode == 1) {
1057 		r = amdgpu_gmc_vram_checking(adev);
1058 		if (r)
1059 			return r;
1060 	}
1061 
1062 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1063 		adev->umc.funcs->init_registers(adev);
1064 
1065 	return 0;
1066 }
1067 
1068 /**
1069  * gmc_v10_0_gart_disable - gart disable
1070  *
1071  * @adev: amdgpu_device pointer
1072  *
1073  * This disables all VM page table.
1074  */
1075 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1076 {
1077 	adev->gfxhub.funcs->gart_disable(adev);
1078 	adev->mmhub.funcs->gart_disable(adev);
1079 }
1080 
1081 static int gmc_v10_0_hw_fini(void *handle)
1082 {
1083 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1084 
1085 	gmc_v10_0_gart_disable(adev);
1086 
1087 	if (amdgpu_sriov_vf(adev)) {
1088 		/* full access mode, so don't touch any GMC register */
1089 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1090 		return 0;
1091 	}
1092 
1093 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1094 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1095 
1096 	return 0;
1097 }
1098 
1099 static int gmc_v10_0_suspend(void *handle)
1100 {
1101 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1102 
1103 	gmc_v10_0_hw_fini(adev);
1104 
1105 	return 0;
1106 }
1107 
1108 static int gmc_v10_0_resume(void *handle)
1109 {
1110 	int r;
1111 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1112 
1113 	r = gmc_v10_0_hw_init(adev);
1114 	if (r)
1115 		return r;
1116 
1117 	amdgpu_vmid_reset_all(adev);
1118 
1119 	return 0;
1120 }
1121 
1122 static bool gmc_v10_0_is_idle(void *handle)
1123 {
1124 	/* MC is always ready in GMC v10.*/
1125 	return true;
1126 }
1127 
1128 static int gmc_v10_0_wait_for_idle(void *handle)
1129 {
1130 	/* There is no need to wait for MC idle in GMC v10.*/
1131 	return 0;
1132 }
1133 
1134 static int gmc_v10_0_soft_reset(void *handle)
1135 {
1136 	return 0;
1137 }
1138 
1139 static int gmc_v10_0_set_clockgating_state(void *handle,
1140 					   enum amd_clockgating_state state)
1141 {
1142 	int r;
1143 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1144 
1145 	r = adev->mmhub.funcs->set_clockgating(adev, state);
1146 	if (r)
1147 		return r;
1148 
1149 	if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1150 		return athub_v2_1_set_clockgating(adev, state);
1151 	else
1152 		return athub_v2_0_set_clockgating(adev, state);
1153 }
1154 
1155 static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1156 {
1157 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1158 
1159 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3))
1160 		return;
1161 
1162 	adev->mmhub.funcs->get_clockgating(adev, flags);
1163 
1164 	if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1165 		athub_v2_1_get_clockgating(adev, flags);
1166 	else
1167 		athub_v2_0_get_clockgating(adev, flags);
1168 }
1169 
1170 static int gmc_v10_0_set_powergating_state(void *handle,
1171 					   enum amd_powergating_state state)
1172 {
1173 	return 0;
1174 }
1175 
1176 const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1177 	.name = "gmc_v10_0",
1178 	.early_init = gmc_v10_0_early_init,
1179 	.late_init = gmc_v10_0_late_init,
1180 	.sw_init = gmc_v10_0_sw_init,
1181 	.sw_fini = gmc_v10_0_sw_fini,
1182 	.hw_init = gmc_v10_0_hw_init,
1183 	.hw_fini = gmc_v10_0_hw_fini,
1184 	.suspend = gmc_v10_0_suspend,
1185 	.resume = gmc_v10_0_resume,
1186 	.is_idle = gmc_v10_0_is_idle,
1187 	.wait_for_idle = gmc_v10_0_wait_for_idle,
1188 	.soft_reset = gmc_v10_0_soft_reset,
1189 	.set_clockgating_state = gmc_v10_0_set_clockgating_state,
1190 	.set_powergating_state = gmc_v10_0_set_powergating_state,
1191 	.get_clockgating_state = gmc_v10_0_get_clockgating_state,
1192 };
1193 
1194 const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1195 {
1196 	.type = AMD_IP_BLOCK_TYPE_GMC,
1197 	.major = 10,
1198 	.minor = 0,
1199 	.rev = 0,
1200 	.funcs = &gmc_v10_0_ip_funcs,
1201 };
1202