1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25 
26 #include <drm/drm_cache.h>
27 
28 #include "amdgpu.h"
29 #include "amdgpu_atomfirmware.h"
30 #include "gmc_v11_0.h"
31 #include "umc_v8_10.h"
32 #include "athub/athub_3_0_0_sh_mask.h"
33 #include "athub/athub_3_0_0_offset.h"
34 #include "oss/osssys_6_0_0_offset.h"
35 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
36 #include "navi10_enum.h"
37 #include "soc15.h"
38 #include "soc15d.h"
39 #include "soc15_common.h"
40 #include "nbio_v4_3.h"
41 #include "gfxhub_v3_0.h"
42 #include "gfxhub_v3_0_3.h"
43 #include "mmhub_v3_0.h"
44 #include "mmhub_v3_0_1.h"
45 #include "mmhub_v3_0_2.h"
46 #include "athub_v3_0.h"
47 
48 
49 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
50 					 struct amdgpu_irq_src *src,
51 					 unsigned type,
52 					 enum amdgpu_interrupt_state state)
53 {
54 	return 0;
55 }
56 
57 static int
58 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
59 				   struct amdgpu_irq_src *src, unsigned type,
60 				   enum amdgpu_interrupt_state state)
61 {
62 	switch (state) {
63 	case AMDGPU_IRQ_STATE_DISABLE:
64 		/* MM HUB */
65 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
66 		/* GFX HUB */
67 		/* This works because this interrupt is only
68 		 * enabled at init/resume and disabled in
69 		 * fini/suspend, so the overall state doesn't
70 		 * change over the course of suspend/resume.
71 		 */
72 		if (!adev->in_s0ix)
73 			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
74 		break;
75 	case AMDGPU_IRQ_STATE_ENABLE:
76 		/* MM HUB */
77 		amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
78 		/* GFX HUB */
79 		/* This works because this interrupt is only
80 		 * enabled at init/resume and disabled in
81 		 * fini/suspend, so the overall state doesn't
82 		 * change over the course of suspend/resume.
83 		 */
84 		if (!adev->in_s0ix)
85 			amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
86 		break;
87 	default:
88 		break;
89 	}
90 
91 	return 0;
92 }
93 
94 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
95 				       struct amdgpu_irq_src *source,
96 				       struct amdgpu_iv_entry *entry)
97 {
98 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
99 	uint32_t status = 0;
100 	u64 addr;
101 
102 	addr = (u64)entry->src_data[0] << 12;
103 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
104 
105 	if (!amdgpu_sriov_vf(adev)) {
106 		/*
107 		 * Issue a dummy read to wait for the status register to
108 		 * be updated to avoid reading an incorrect value due to
109 		 * the new fast GRBM interface.
110 		 */
111 		if (entry->vmid_src == AMDGPU_GFXHUB_0)
112 			RREG32(hub->vm_l2_pro_fault_status);
113 
114 		status = RREG32(hub->vm_l2_pro_fault_status);
115 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
116 	}
117 
118 	if (printk_ratelimit()) {
119 		struct amdgpu_task_info task_info;
120 
121 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
122 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
123 
124 		dev_err(adev->dev,
125 			"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
126 			"for process %s pid %d thread %s pid %d)\n",
127 			entry->vmid_src ? "mmhub" : "gfxhub",
128 			entry->src_id, entry->ring_id, entry->vmid,
129 			entry->pasid, task_info.process_name, task_info.tgid,
130 			task_info.task_name, task_info.pid);
131 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
132 			addr, entry->client_id);
133 		if (!amdgpu_sriov_vf(adev))
134 			hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
135 	}
136 
137 	return 0;
138 }
139 
140 static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = {
141 	.set = gmc_v11_0_vm_fault_interrupt_state,
142 	.process = gmc_v11_0_process_interrupt,
143 };
144 
145 static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = {
146 	.set = gmc_v11_0_ecc_interrupt_state,
147 	.process = amdgpu_umc_process_ecc_irq,
148 };
149 
150 static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
151 {
152 	adev->gmc.vm_fault.num_types = 1;
153 	adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs;
154 
155 	if (!amdgpu_sriov_vf(adev)) {
156 		adev->gmc.ecc_irq.num_types = 1;
157 		adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs;
158 	}
159 }
160 
161 /**
162  * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore
163  *
164  * @adev: amdgpu_device pointer
165  * @vmhub: vmhub type
166  *
167  */
168 static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
169 				       uint32_t vmhub)
170 {
171 	return ((vmhub == AMDGPU_MMHUB_0) &&
172 		(!amdgpu_sriov_vf(adev)));
173 }
174 
175 static bool gmc_v11_0_get_vmid_pasid_mapping_info(
176 					struct amdgpu_device *adev,
177 					uint8_t vmid, uint16_t *p_pasid)
178 {
179 	*p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
180 
181 	return !!(*p_pasid);
182 }
183 
184 /*
185  * GART
186  * VMID 0 is the physical GPU addresses as used by the kernel.
187  * VMIDs 1-15 are used for userspace clients and are handled
188  * by the amdgpu vm/hsa code.
189  */
190 
191 static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
192 				   unsigned int vmhub, uint32_t flush_type)
193 {
194 	bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
195 	struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
196 	u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
197 	u32 tmp;
198 	/* Use register 17 for GART */
199 	const unsigned eng = 17;
200 	unsigned int i;
201 	unsigned char hub_ip = 0;
202 
203 	hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
204 		   GC_HWIP : MMHUB_HWIP;
205 
206 	spin_lock(&adev->gmc.invalidate_lock);
207 	/*
208 	 * It may lose gpuvm invalidate acknowldege state across power-gating
209 	 * off cycle, add semaphore acquire before invalidation and semaphore
210 	 * release after invalidation to avoid entering power gated state
211 	 * to WA the Issue
212 	 */
213 
214 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
215 	if (use_semaphore) {
216 		for (i = 0; i < adev->usec_timeout; i++) {
217 			/* a read return value of 1 means semaphore acuqire */
218 			tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
219 					    hub->eng_distance * eng, hub_ip);
220 			if (tmp & 0x1)
221 				break;
222 			udelay(1);
223 		}
224 
225 		if (i >= adev->usec_timeout)
226 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
227 	}
228 
229 	WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
230 
231 	/* Wait for ACK with a delay.*/
232 	for (i = 0; i < adev->usec_timeout; i++) {
233 		tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
234 				    hub->eng_distance * eng, hub_ip);
235 		tmp &= 1 << vmid;
236 		if (tmp)
237 			break;
238 
239 		udelay(1);
240 	}
241 
242 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
243 	if (use_semaphore)
244 		/*
245 		 * add semaphore release after invalidation,
246 		 * write with 0 means semaphore release
247 		 */
248 		WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
249 			      hub->eng_distance * eng, 0, hub_ip);
250 
251 	/* Issue additional private vm invalidation to MMHUB */
252 	if ((vmhub != AMDGPU_GFXHUB_0) &&
253 	    (hub->vm_l2_bank_select_reserved_cid2) &&
254 		!amdgpu_sriov_vf(adev)) {
255 		inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
256 		/* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
257 		inv_req |= (1 << 25);
258 		/* Issue private invalidation */
259 		WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
260 		/* Read back to ensure invalidation is done*/
261 		RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
262 	}
263 
264 	spin_unlock(&adev->gmc.invalidate_lock);
265 
266 	if (i < adev->usec_timeout)
267 		return;
268 
269 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
270 }
271 
272 /**
273  * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback
274  *
275  * @adev: amdgpu_device pointer
276  * @vmid: vm instance to flush
277  *
278  * Flush the TLB for the requested page table.
279  */
280 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
281 					uint32_t vmhub, uint32_t flush_type)
282 {
283 	if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron)
284 		return;
285 
286 	/* flush hdp cache */
287 	adev->hdp.funcs->flush_hdp(adev, NULL);
288 
289 	/* For SRIOV run time, driver shouldn't access the register through MMIO
290 	 * Directly use kiq to do the vm invalidation instead
291 	 */
292 	if ((adev->gfx.kiq.ring.sched.ready || adev->mes.ring.sched.ready) &&
293 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
294 		struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
295 		const unsigned eng = 17;
296 		u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
297 		u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
298 		u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
299 
300 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
301 				1 << vmid);
302 		return;
303 	}
304 
305 	mutex_lock(&adev->mman.gtt_window_lock);
306 	gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
307 	mutex_unlock(&adev->mman.gtt_window_lock);
308 	return;
309 }
310 
311 /**
312  * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid
313  *
314  * @adev: amdgpu_device pointer
315  * @pasid: pasid to be flush
316  *
317  * Flush the TLB for the requested pasid.
318  */
319 static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
320 					uint16_t pasid, uint32_t flush_type,
321 					bool all_hub)
322 {
323 	int vmid, i;
324 	signed long r;
325 	uint32_t seq;
326 	uint16_t queried_pasid;
327 	bool ret;
328 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
329 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
330 
331 	if (amdgpu_emu_mode == 0 && ring->sched.ready) {
332 		spin_lock(&adev->gfx.kiq.ring_lock);
333 		/* 2 dwords flush + 8 dwords fence */
334 		amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
335 		kiq->pmf->kiq_invalidate_tlbs(ring,
336 					pasid, flush_type, all_hub);
337 		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
338 		if (r) {
339 			amdgpu_ring_undo(ring);
340 			spin_unlock(&adev->gfx.kiq.ring_lock);
341 			return -ETIME;
342 		}
343 
344 		amdgpu_ring_commit(ring);
345 		spin_unlock(&adev->gfx.kiq.ring_lock);
346 		r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
347 		if (r < 1) {
348 			dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
349 			return -ETIME;
350 		}
351 
352 		return 0;
353 	}
354 
355 	for (vmid = 1; vmid < 16; vmid++) {
356 
357 		ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
358 				&queried_pasid);
359 		if (ret	&& queried_pasid == pasid) {
360 			if (all_hub) {
361 				for (i = 0; i < adev->num_vmhubs; i++)
362 					gmc_v11_0_flush_gpu_tlb(adev, vmid,
363 							i, flush_type);
364 			} else {
365 				gmc_v11_0_flush_gpu_tlb(adev, vmid,
366 						AMDGPU_GFXHUB_0, flush_type);
367 			}
368 		}
369 	}
370 
371 	return 0;
372 }
373 
374 static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
375 					     unsigned vmid, uint64_t pd_addr)
376 {
377 	bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
378 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
379 	uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
380 	unsigned eng = ring->vm_inv_eng;
381 
382 	/*
383 	 * It may lose gpuvm invalidate acknowldege state across power-gating
384 	 * off cycle, add semaphore acquire before invalidation and semaphore
385 	 * release after invalidation to avoid entering power gated state
386 	 * to WA the Issue
387 	 */
388 
389 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
390 	if (use_semaphore)
391 		/* a read return value of 1 means semaphore acuqire */
392 		amdgpu_ring_emit_reg_wait(ring,
393 					  hub->vm_inv_eng0_sem +
394 					  hub->eng_distance * eng, 0x1, 0x1);
395 
396 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
397 			      (hub->ctx_addr_distance * vmid),
398 			      lower_32_bits(pd_addr));
399 
400 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
401 			      (hub->ctx_addr_distance * vmid),
402 			      upper_32_bits(pd_addr));
403 
404 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
405 					    hub->eng_distance * eng,
406 					    hub->vm_inv_eng0_ack +
407 					    hub->eng_distance * eng,
408 					    req, 1 << vmid);
409 
410 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
411 	if (use_semaphore)
412 		/*
413 		 * add semaphore release after invalidation,
414 		 * write with 0 means semaphore release
415 		 */
416 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
417 				      hub->eng_distance * eng, 0);
418 
419 	return pd_addr;
420 }
421 
422 static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
423 					 unsigned pasid)
424 {
425 	struct amdgpu_device *adev = ring->adev;
426 	uint32_t reg;
427 
428 	/* MES fw manages IH_VMID_x_LUT updating */
429 	if (ring->is_mes_queue)
430 		return;
431 
432 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
433 		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
434 	else
435 		reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
436 
437 	amdgpu_ring_emit_wreg(ring, reg, pasid);
438 }
439 
440 /*
441  * PTE format:
442  * 63:59 reserved
443  * 58:57 reserved
444  * 56 F
445  * 55 L
446  * 54 reserved
447  * 53:52 SW
448  * 51 T
449  * 50:48 mtype
450  * 47:12 4k physical page base address
451  * 11:7 fragment
452  * 6 write
453  * 5 read
454  * 4 exe
455  * 3 Z
456  * 2 snooped
457  * 1 system
458  * 0 valid
459  *
460  * PDE format:
461  * 63:59 block fragment size
462  * 58:55 reserved
463  * 54 P
464  * 53:48 reserved
465  * 47:6 physical base address of PD or PTE
466  * 5:3 reserved
467  * 2 C
468  * 1 system
469  * 0 valid
470  */
471 
472 static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
473 {
474 	switch (flags) {
475 	case AMDGPU_VM_MTYPE_DEFAULT:
476 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
477 	case AMDGPU_VM_MTYPE_NC:
478 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
479 	case AMDGPU_VM_MTYPE_WC:
480 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
481 	case AMDGPU_VM_MTYPE_CC:
482 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
483 	case AMDGPU_VM_MTYPE_UC:
484 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
485 	default:
486 		return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
487 	}
488 }
489 
490 static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
491 				 uint64_t *addr, uint64_t *flags)
492 {
493 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
494 		*addr = adev->vm_manager.vram_base_offset + *addr -
495 			adev->gmc.vram_start;
496 	BUG_ON(*addr & 0xFFFF00000000003FULL);
497 
498 	if (!adev->gmc.translate_further)
499 		return;
500 
501 	if (level == AMDGPU_VM_PDB1) {
502 		/* Set the block fragment size */
503 		if (!(*flags & AMDGPU_PDE_PTE))
504 			*flags |= AMDGPU_PDE_BFS(0x9);
505 
506 	} else if (level == AMDGPU_VM_PDB0) {
507 		if (*flags & AMDGPU_PDE_PTE)
508 			*flags &= ~AMDGPU_PDE_PTE;
509 		else
510 			*flags |= AMDGPU_PTE_TF;
511 	}
512 }
513 
514 static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
515 				 struct amdgpu_bo_va_mapping *mapping,
516 				 uint64_t *flags)
517 {
518 	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
519 
520 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
521 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
522 
523 	*flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
524 	*flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
525 
526 	*flags &= ~AMDGPU_PTE_NOALLOC;
527 	*flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
528 
529 	if (mapping->flags & AMDGPU_PTE_PRT) {
530 		*flags |= AMDGPU_PTE_PRT;
531 		*flags |= AMDGPU_PTE_SNOOPED;
532 		*flags |= AMDGPU_PTE_LOG;
533 		*flags |= AMDGPU_PTE_SYSTEM;
534 		*flags &= ~AMDGPU_PTE_VALID;
535 	}
536 
537 	if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
538 			       AMDGPU_GEM_CREATE_UNCACHED))
539 		*flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
540 			 AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
541 }
542 
543 static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
544 {
545 	return 0;
546 }
547 
548 static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
549 	.flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb,
550 	.flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
551 	.emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
552 	.emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
553 	.map_mtype = gmc_v11_0_map_mtype,
554 	.get_vm_pde = gmc_v11_0_get_vm_pde,
555 	.get_vm_pte = gmc_v11_0_get_vm_pte,
556 	.get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
557 };
558 
559 static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)
560 {
561 	adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs;
562 }
563 
564 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
565 {
566 	switch (adev->ip_versions[UMC_HWIP][0]) {
567 	case IP_VERSION(8, 10, 0):
568 		adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
569 		adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
570 		adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
571 		adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
572 		adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
573 		if (adev->umc.node_inst_num == 4)
574 			adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
575 		else
576 			adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
577 		adev->umc.ras = &umc_v8_10_ras;
578 		break;
579 	case IP_VERSION(8, 11, 0):
580 		break;
581 	default:
582 		break;
583 	}
584 
585 	if (adev->umc.ras) {
586 		amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
587 
588 		strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
589 		adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
590 		adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
591 		adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
592 
593 		/* If don't define special ras_late_init function, use default ras_late_init */
594 		if (!adev->umc.ras->ras_block.ras_late_init)
595 			adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
596 
597 		/* If not define special ras_cb function, use default ras_cb */
598 		if (!adev->umc.ras->ras_block.ras_cb)
599 			adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
600 	}
601 }
602 
603 
604 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
605 {
606 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
607 	case IP_VERSION(3, 0, 1):
608 		adev->mmhub.funcs = &mmhub_v3_0_1_funcs;
609 		break;
610 	case IP_VERSION(3, 0, 2):
611 		adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
612 		break;
613 	default:
614 		adev->mmhub.funcs = &mmhub_v3_0_funcs;
615 		break;
616 	}
617 }
618 
619 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
620 {
621 	switch (adev->ip_versions[GC_HWIP][0]) {
622 	case IP_VERSION(11, 0, 3):
623 		adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
624 		break;
625 	default:
626 		adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
627 		break;
628 	}
629 }
630 
631 static int gmc_v11_0_early_init(void *handle)
632 {
633 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
634 
635 	gmc_v11_0_set_gfxhub_funcs(adev);
636 	gmc_v11_0_set_mmhub_funcs(adev);
637 	gmc_v11_0_set_gmc_funcs(adev);
638 	gmc_v11_0_set_irq_funcs(adev);
639 	gmc_v11_0_set_umc_funcs(adev);
640 
641 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
642 	adev->gmc.shared_aperture_end =
643 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
644 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
645 	adev->gmc.private_aperture_end =
646 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
647 
648 	return 0;
649 }
650 
651 static int gmc_v11_0_late_init(void *handle)
652 {
653 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
654 	int r;
655 
656 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
657 	if (r)
658 		return r;
659 
660 	r = amdgpu_gmc_ras_late_init(adev);
661 	if (r)
662 		return r;
663 
664 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
665 }
666 
667 static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
668 					struct amdgpu_gmc *mc)
669 {
670 	u64 base = 0;
671 
672 	base = adev->mmhub.funcs->get_fb_location(adev);
673 
674 	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
675 	amdgpu_gmc_gart_location(adev, mc);
676 	amdgpu_gmc_agp_location(adev, mc);
677 
678 	/* base offset of vram pages */
679 	if (amdgpu_sriov_vf(adev))
680 		adev->vm_manager.vram_base_offset = 0;
681 	else
682 		adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
683 }
684 
685 /**
686  * gmc_v11_0_mc_init - initialize the memory controller driver params
687  *
688  * @adev: amdgpu_device pointer
689  *
690  * Look up the amount of vram, vram width, and decide how to place
691  * vram and gart within the GPU's physical address space.
692  * Returns 0 for success.
693  */
694 static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
695 {
696 	int r;
697 
698 	/* size in MB on si */
699 	adev->gmc.mc_vram_size =
700 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
701 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
702 
703 	if (!(adev->flags & AMD_IS_APU)) {
704 		r = amdgpu_device_resize_fb_bar(adev);
705 		if (r)
706 			return r;
707 	}
708 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
709 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
710 
711 #ifdef CONFIG_X86_64
712 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
713 		adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
714 		adev->gmc.aper_size = adev->gmc.real_vram_size;
715 	}
716 #endif
717 	/* In case the PCI BAR is larger than the actual amount of vram */
718 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
719 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
720 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
721 
722 	/* set the gart size */
723 	if (amdgpu_gart_size == -1) {
724 		adev->gmc.gart_size = 512ULL << 20;
725 	} else
726 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
727 
728 	gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
729 
730 	return 0;
731 }
732 
733 static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
734 {
735 	int r;
736 
737 	if (adev->gart.bo) {
738 		WARN(1, "PCIE GART already initialized\n");
739 		return 0;
740 	}
741 
742 	/* Initialize common gart structure */
743 	r = amdgpu_gart_init(adev);
744 	if (r)
745 		return r;
746 
747 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
748 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
749 				 AMDGPU_PTE_EXECUTABLE;
750 
751 	return amdgpu_gart_table_vram_alloc(adev);
752 }
753 
754 static int gmc_v11_0_sw_init(void *handle)
755 {
756 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
757 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
758 
759 	adev->mmhub.funcs->init(adev);
760 
761 	spin_lock_init(&adev->gmc.invalidate_lock);
762 
763 	r = amdgpu_atomfirmware_get_vram_info(adev,
764 					      &vram_width, &vram_type, &vram_vendor);
765 	adev->gmc.vram_width = vram_width;
766 
767 	adev->gmc.vram_type = vram_type;
768 	adev->gmc.vram_vendor = vram_vendor;
769 
770 	switch (adev->ip_versions[GC_HWIP][0]) {
771 	case IP_VERSION(11, 0, 0):
772 	case IP_VERSION(11, 0, 1):
773 	case IP_VERSION(11, 0, 2):
774 	case IP_VERSION(11, 0, 3):
775 	case IP_VERSION(11, 0, 4):
776 		adev->num_vmhubs = 2;
777 		/*
778 		 * To fulfill 4-level page support,
779 		 * vm size is 256TB (48bit), maximum size,
780 		 * block size 512 (9bit)
781 		 */
782 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
783 		break;
784 	default:
785 		break;
786 	}
787 
788 	/* This interrupt is VMC page fault.*/
789 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
790 			      VMC_1_0__SRCID__VM_FAULT,
791 			      &adev->gmc.vm_fault);
792 
793 	if (r)
794 		return r;
795 
796 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
797 			      UTCL2_1_0__SRCID__FAULT,
798 			      &adev->gmc.vm_fault);
799 	if (r)
800 		return r;
801 
802 	if (!amdgpu_sriov_vf(adev)) {
803 		/* interrupt sent to DF. */
804 		r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
805 				      &adev->gmc.ecc_irq);
806 		if (r)
807 			return r;
808 	}
809 
810 	/*
811 	 * Set the internal MC address mask This is the max address of the GPU's
812 	 * internal address space.
813 	 */
814 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
815 
816 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
817 	if (r) {
818 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
819 		return r;
820 	}
821 
822 	adev->need_swiotlb = drm_need_swiotlb(44);
823 
824 	r = gmc_v11_0_mc_init(adev);
825 	if (r)
826 		return r;
827 
828 	amdgpu_gmc_get_vbios_allocations(adev);
829 
830 	/* Memory manager */
831 	r = amdgpu_bo_init(adev);
832 	if (r)
833 		return r;
834 
835 	r = gmc_v11_0_gart_init(adev);
836 	if (r)
837 		return r;
838 
839 	/*
840 	 * number of VMs
841 	 * VMID 0 is reserved for System
842 	 * amdgpu graphics/compute will use VMIDs 1-7
843 	 * amdkfd will use VMIDs 8-15
844 	 */
845 	adev->vm_manager.first_kfd_vmid = 8;
846 
847 	amdgpu_vm_manager_init(adev);
848 
849 	return 0;
850 }
851 
852 /**
853  * gmc_v11_0_gart_fini - vm fini callback
854  *
855  * @adev: amdgpu_device pointer
856  *
857  * Tears down the driver GART/VM setup (CIK).
858  */
859 static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
860 {
861 	amdgpu_gart_table_vram_free(adev);
862 }
863 
864 static int gmc_v11_0_sw_fini(void *handle)
865 {
866 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
867 
868 	amdgpu_vm_manager_fini(adev);
869 	gmc_v11_0_gart_fini(adev);
870 	amdgpu_gem_force_release(adev);
871 	amdgpu_bo_fini(adev);
872 
873 	return 0;
874 }
875 
876 static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
877 {
878 }
879 
880 /**
881  * gmc_v11_0_gart_enable - gart enable
882  *
883  * @adev: amdgpu_device pointer
884  */
885 static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
886 {
887 	int r;
888 	bool value;
889 
890 	if (adev->gart.bo == NULL) {
891 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
892 		return -EINVAL;
893 	}
894 
895 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
896 
897 	r = adev->mmhub.funcs->gart_enable(adev);
898 	if (r)
899 		return r;
900 
901 	/* Flush HDP after it is initialized */
902 	adev->hdp.funcs->flush_hdp(adev, NULL);
903 
904 	value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
905 		false : true;
906 
907 	adev->mmhub.funcs->set_fault_enable_default(adev, value);
908 	gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
909 
910 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
911 		 (unsigned)(adev->gmc.gart_size >> 20),
912 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
913 
914 	return 0;
915 }
916 
917 static int gmc_v11_0_hw_init(void *handle)
918 {
919 	int r;
920 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
921 
922 	/* The sequence of these two function calls matters.*/
923 	gmc_v11_0_init_golden_registers(adev);
924 
925 	r = gmc_v11_0_gart_enable(adev);
926 	if (r)
927 		return r;
928 
929 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
930 		adev->umc.funcs->init_registers(adev);
931 
932 	return 0;
933 }
934 
935 /**
936  * gmc_v11_0_gart_disable - gart disable
937  *
938  * @adev: amdgpu_device pointer
939  *
940  * This disables all VM page table.
941  */
942 static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
943 {
944 	adev->mmhub.funcs->gart_disable(adev);
945 }
946 
947 static int gmc_v11_0_hw_fini(void *handle)
948 {
949 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
950 
951 	if (amdgpu_sriov_vf(adev)) {
952 		/* full access mode, so don't touch any GMC register */
953 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
954 		return 0;
955 	}
956 
957 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
958 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
959 	gmc_v11_0_gart_disable(adev);
960 
961 	return 0;
962 }
963 
964 static int gmc_v11_0_suspend(void *handle)
965 {
966 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
967 
968 	gmc_v11_0_hw_fini(adev);
969 
970 	return 0;
971 }
972 
973 static int gmc_v11_0_resume(void *handle)
974 {
975 	int r;
976 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
977 
978 	r = gmc_v11_0_hw_init(adev);
979 	if (r)
980 		return r;
981 
982 	amdgpu_vmid_reset_all(adev);
983 
984 	return 0;
985 }
986 
987 static bool gmc_v11_0_is_idle(void *handle)
988 {
989 	/* MC is always ready in GMC v11.*/
990 	return true;
991 }
992 
993 static int gmc_v11_0_wait_for_idle(void *handle)
994 {
995 	/* There is no need to wait for MC idle in GMC v11.*/
996 	return 0;
997 }
998 
999 static int gmc_v11_0_soft_reset(void *handle)
1000 {
1001 	return 0;
1002 }
1003 
1004 static int gmc_v11_0_set_clockgating_state(void *handle,
1005 					   enum amd_clockgating_state state)
1006 {
1007 	int r;
1008 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1009 
1010 	r = adev->mmhub.funcs->set_clockgating(adev, state);
1011 	if (r)
1012 		return r;
1013 
1014 	return athub_v3_0_set_clockgating(adev, state);
1015 }
1016 
1017 static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
1018 {
1019 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1020 
1021 	adev->mmhub.funcs->get_clockgating(adev, flags);
1022 
1023 	athub_v3_0_get_clockgating(adev, flags);
1024 }
1025 
1026 static int gmc_v11_0_set_powergating_state(void *handle,
1027 					   enum amd_powergating_state state)
1028 {
1029 	return 0;
1030 }
1031 
1032 const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
1033 	.name = "gmc_v11_0",
1034 	.early_init = gmc_v11_0_early_init,
1035 	.sw_init = gmc_v11_0_sw_init,
1036 	.hw_init = gmc_v11_0_hw_init,
1037 	.late_init = gmc_v11_0_late_init,
1038 	.sw_fini = gmc_v11_0_sw_fini,
1039 	.hw_fini = gmc_v11_0_hw_fini,
1040 	.suspend = gmc_v11_0_suspend,
1041 	.resume = gmc_v11_0_resume,
1042 	.is_idle = gmc_v11_0_is_idle,
1043 	.wait_for_idle = gmc_v11_0_wait_for_idle,
1044 	.soft_reset = gmc_v11_0_soft_reset,
1045 	.set_clockgating_state = gmc_v11_0_set_clockgating_state,
1046 	.set_powergating_state = gmc_v11_0_set_powergating_state,
1047 	.get_clockgating_state = gmc_v11_0_get_clockgating_state,
1048 };
1049 
1050 const struct amdgpu_ip_block_version gmc_v11_0_ip_block = {
1051 	.type = AMD_IP_BLOCK_TYPE_GMC,
1052 	.major = 11,
1053 	.minor = 0,
1054 	.rev = 0,
1055 	.funcs = &gmc_v11_0_ip_funcs,
1056 };
1057