xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 5a9b8e8a)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23e60f8db5SAlex Xie #include <linux/firmware.h>
24e60f8db5SAlex Xie #include "amdgpu.h"
25e60f8db5SAlex Xie #include "gmc_v9_0.h"
26e60f8db5SAlex Xie 
27e60f8db5SAlex Xie #include "vega10/soc15ip.h"
28e60f8db5SAlex Xie #include "vega10/HDP/hdp_4_0_offset.h"
29e60f8db5SAlex Xie #include "vega10/HDP/hdp_4_0_sh_mask.h"
30e60f8db5SAlex Xie #include "vega10/GC/gc_9_0_sh_mask.h"
31e60f8db5SAlex Xie #include "vega10/vega10_enum.h"
32e60f8db5SAlex Xie 
33e60f8db5SAlex Xie #include "soc15_common.h"
34e60f8db5SAlex Xie 
35e60f8db5SAlex Xie #include "nbio_v6_1.h"
36e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
37e60f8db5SAlex Xie #include "mmhub_v1_0.h"
38e60f8db5SAlex Xie 
39e60f8db5SAlex Xie #define mmDF_CS_AON0_DramBaseAddress0                                                                  0x0044
40e60f8db5SAlex Xie #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX                                                         0
41e60f8db5SAlex Xie //DF_CS_AON0_DramBaseAddress0
42e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT                                                        0x0
43e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT                                                    0x1
44e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT                                                      0x4
45e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT                                                      0x8
46e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT                                                      0xc
47e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK                                                          0x00000001L
48e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK                                                      0x00000002L
49e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK                                                        0x000000F0L
50e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
51e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L
52e60f8db5SAlex Xie 
53e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
54e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
55e60f8db5SAlex Xie 
56e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
57e60f8db5SAlex Xie {
58e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
59e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
60e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
61e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
62e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
63e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
64e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
65e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
66e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
67e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
68e60f8db5SAlex Xie };
69e60f8db5SAlex Xie 
70e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
71e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
72e60f8db5SAlex Xie 					unsigned type,
73e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
74e60f8db5SAlex Xie {
75e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
76e60f8db5SAlex Xie 	u32 tmp, reg, bits, i;
77e60f8db5SAlex Xie 
78e60f8db5SAlex Xie 	switch (state) {
79e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
80e60f8db5SAlex Xie 		/* MM HUB */
81e60f8db5SAlex Xie 		hub = &adev->vmhub[AMDGPU_MMHUB];
82e60f8db5SAlex Xie 		bits = hub->get_vm_protection_bits();
83e60f8db5SAlex Xie 		for (i = 0; i< 16; i++) {
84e60f8db5SAlex Xie 			reg = hub->vm_context0_cntl + i;
85e60f8db5SAlex Xie 			tmp = RREG32(reg);
86e60f8db5SAlex Xie 			tmp &= ~bits;
87e60f8db5SAlex Xie 			WREG32(reg, tmp);
88e60f8db5SAlex Xie 		}
89e60f8db5SAlex Xie 
90e60f8db5SAlex Xie 		/* GFX HUB */
91e60f8db5SAlex Xie 		hub = &adev->vmhub[AMDGPU_GFXHUB];
92e60f8db5SAlex Xie 		bits = hub->get_vm_protection_bits();
93e60f8db5SAlex Xie 		for (i = 0; i < 16; i++) {
94e60f8db5SAlex Xie 			reg = hub->vm_context0_cntl + i;
95e60f8db5SAlex Xie 			tmp = RREG32(reg);
96e60f8db5SAlex Xie 			tmp &= ~bits;
97e60f8db5SAlex Xie 			WREG32(reg, tmp);
98e60f8db5SAlex Xie 		}
99e60f8db5SAlex Xie 		break;
100e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
101e60f8db5SAlex Xie 		/* MM HUB */
102e60f8db5SAlex Xie 		hub = &adev->vmhub[AMDGPU_MMHUB];
103e60f8db5SAlex Xie 		bits = hub->get_vm_protection_bits();
104e60f8db5SAlex Xie 		for (i = 0; i< 16; i++) {
105e60f8db5SAlex Xie 			reg = hub->vm_context0_cntl + i;
106e60f8db5SAlex Xie 			tmp = RREG32(reg);
107e60f8db5SAlex Xie 			tmp |= bits;
108e60f8db5SAlex Xie 			WREG32(reg, tmp);
109e60f8db5SAlex Xie 		}
110e60f8db5SAlex Xie 
111e60f8db5SAlex Xie 		/* GFX HUB */
112e60f8db5SAlex Xie 		hub = &adev->vmhub[AMDGPU_GFXHUB];
113e60f8db5SAlex Xie 		bits = hub->get_vm_protection_bits();
114e60f8db5SAlex Xie 		for (i = 0; i < 16; i++) {
115e60f8db5SAlex Xie 			reg = hub->vm_context0_cntl + i;
116e60f8db5SAlex Xie 			tmp = RREG32(reg);
117e60f8db5SAlex Xie 			tmp |= bits;
118e60f8db5SAlex Xie 			WREG32(reg, tmp);
119e60f8db5SAlex Xie 		}
120e60f8db5SAlex Xie 		break;
121e60f8db5SAlex Xie 	default:
122e60f8db5SAlex Xie 		break;
123e60f8db5SAlex Xie 	}
124e60f8db5SAlex Xie 
125e60f8db5SAlex Xie 	return 0;
126e60f8db5SAlex Xie }
127e60f8db5SAlex Xie 
128e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
129e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
130e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
131e60f8db5SAlex Xie {
1325a9b8e8aSChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
1334d6cbde3SFelix Kuehling 	uint32_t status = 0;
134e60f8db5SAlex Xie 	u64 addr;
135e60f8db5SAlex Xie 
136e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
137e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
138e60f8db5SAlex Xie 
13979a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
1405a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
1415a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
1424d6cbde3SFelix Kuehling 	}
143e60f8db5SAlex Xie 
1444d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
1454d6cbde3SFelix Kuehling 		dev_err(adev->dev,
1464d6cbde3SFelix Kuehling 			"[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
147e60f8db5SAlex Xie 			entry->vm_id_src ? "mmhub" : "gfxhub",
1484d6cbde3SFelix Kuehling 			entry->src_id, entry->ring_id, entry->vm_id,
1494d6cbde3SFelix Kuehling 			entry->pas_id);
1504d6cbde3SFelix Kuehling 		dev_err(adev->dev, "  at page 0x%016llx from %d\n",
15179a0c465SMonk Liu 			addr, entry->client_id);
1524d6cbde3SFelix Kuehling 		if (!amdgpu_sriov_vf(adev))
1534d6cbde3SFelix Kuehling 			dev_err(adev->dev,
1544d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
1554d6cbde3SFelix Kuehling 				status);
15679a0c465SMonk Liu 	}
157e60f8db5SAlex Xie 
158e60f8db5SAlex Xie 	return 0;
159e60f8db5SAlex Xie }
160e60f8db5SAlex Xie 
161e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
162e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
163e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
164e60f8db5SAlex Xie };
165e60f8db5SAlex Xie 
166e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
167e60f8db5SAlex Xie {
168e60f8db5SAlex Xie 	adev->mc.vm_fault.num_types = 1;
169e60f8db5SAlex Xie 	adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
170e60f8db5SAlex Xie }
171e60f8db5SAlex Xie 
172e60f8db5SAlex Xie /*
173e60f8db5SAlex Xie  * GART
174e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
175e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
176e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
177e60f8db5SAlex Xie  */
178e60f8db5SAlex Xie 
179e60f8db5SAlex Xie /**
180e60f8db5SAlex Xie  * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
181e60f8db5SAlex Xie  *
182e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
183e60f8db5SAlex Xie  * @vmid: vm instance to flush
184e60f8db5SAlex Xie  *
185e60f8db5SAlex Xie  * Flush the TLB for the requested page table.
186e60f8db5SAlex Xie  */
187e60f8db5SAlex Xie static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
188e60f8db5SAlex Xie 					uint32_t vmid)
189e60f8db5SAlex Xie {
190e60f8db5SAlex Xie 	/* Use register 17 for GART */
191e60f8db5SAlex Xie 	const unsigned eng = 17;
192e60f8db5SAlex Xie 	unsigned i, j;
193e60f8db5SAlex Xie 
194e60f8db5SAlex Xie 	/* flush hdp cache */
195e60f8db5SAlex Xie 	nbio_v6_1_hdp_flush(adev);
196e60f8db5SAlex Xie 
197e60f8db5SAlex Xie 	spin_lock(&adev->mc.invalidate_lock);
198e60f8db5SAlex Xie 
199e60f8db5SAlex Xie 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
200e60f8db5SAlex Xie 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
201e60f8db5SAlex Xie 		u32 tmp = hub->get_invalidate_req(vmid);
202e60f8db5SAlex Xie 
203c7a7266bSXiangliang Yu 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
204e60f8db5SAlex Xie 
205e60f8db5SAlex Xie 		/* Busy wait for ACK.*/
206e60f8db5SAlex Xie 		for (j = 0; j < 100; j++) {
207c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
208e60f8db5SAlex Xie 			tmp &= 1 << vmid;
209e60f8db5SAlex Xie 			if (tmp)
210e60f8db5SAlex Xie 				break;
211e60f8db5SAlex Xie 			cpu_relax();
212e60f8db5SAlex Xie 		}
213e60f8db5SAlex Xie 		if (j < 100)
214e60f8db5SAlex Xie 			continue;
215e60f8db5SAlex Xie 
216e60f8db5SAlex Xie 		/* Wait for ACK with a delay.*/
217e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
218c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
219e60f8db5SAlex Xie 			tmp &= 1 << vmid;
220e60f8db5SAlex Xie 			if (tmp)
221e60f8db5SAlex Xie 				break;
222e60f8db5SAlex Xie 			udelay(1);
223e60f8db5SAlex Xie 		}
224e60f8db5SAlex Xie 		if (j < adev->usec_timeout)
225e60f8db5SAlex Xie 			continue;
226e60f8db5SAlex Xie 
227e60f8db5SAlex Xie 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
228e60f8db5SAlex Xie 	}
229e60f8db5SAlex Xie 
230e60f8db5SAlex Xie 	spin_unlock(&adev->mc.invalidate_lock);
231e60f8db5SAlex Xie }
232e60f8db5SAlex Xie 
233e60f8db5SAlex Xie /**
234e60f8db5SAlex Xie  * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
235e60f8db5SAlex Xie  *
236e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
237e60f8db5SAlex Xie  * @cpu_pt_addr: cpu address of the page table
238e60f8db5SAlex Xie  * @gpu_page_idx: entry in the page table to update
239e60f8db5SAlex Xie  * @addr: dst addr to write into pte/pde
240e60f8db5SAlex Xie  * @flags: access flags
241e60f8db5SAlex Xie  *
242e60f8db5SAlex Xie  * Update the page tables using the CPU.
243e60f8db5SAlex Xie  */
244e60f8db5SAlex Xie static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
245e60f8db5SAlex Xie 					void *cpu_pt_addr,
246e60f8db5SAlex Xie 					uint32_t gpu_page_idx,
247e60f8db5SAlex Xie 					uint64_t addr,
248e60f8db5SAlex Xie 					uint64_t flags)
249e60f8db5SAlex Xie {
250e60f8db5SAlex Xie 	void __iomem *ptr = (void *)cpu_pt_addr;
251e60f8db5SAlex Xie 	uint64_t value;
252e60f8db5SAlex Xie 
253e60f8db5SAlex Xie 	/*
254e60f8db5SAlex Xie 	 * PTE format on VEGA 10:
255e60f8db5SAlex Xie 	 * 63:59 reserved
256e60f8db5SAlex Xie 	 * 58:57 mtype
257e60f8db5SAlex Xie 	 * 56 F
258e60f8db5SAlex Xie 	 * 55 L
259e60f8db5SAlex Xie 	 * 54 P
260e60f8db5SAlex Xie 	 * 53 SW
261e60f8db5SAlex Xie 	 * 52 T
262e60f8db5SAlex Xie 	 * 50:48 reserved
263e60f8db5SAlex Xie 	 * 47:12 4k physical page base address
264e60f8db5SAlex Xie 	 * 11:7 fragment
265e60f8db5SAlex Xie 	 * 6 write
266e60f8db5SAlex Xie 	 * 5 read
267e60f8db5SAlex Xie 	 * 4 exe
268e60f8db5SAlex Xie 	 * 3 Z
269e60f8db5SAlex Xie 	 * 2 snooped
270e60f8db5SAlex Xie 	 * 1 system
271e60f8db5SAlex Xie 	 * 0 valid
272e60f8db5SAlex Xie 	 *
273e60f8db5SAlex Xie 	 * PDE format on VEGA 10:
274e60f8db5SAlex Xie 	 * 63:59 block fragment size
275e60f8db5SAlex Xie 	 * 58:55 reserved
276e60f8db5SAlex Xie 	 * 54 P
277e60f8db5SAlex Xie 	 * 53:48 reserved
278e60f8db5SAlex Xie 	 * 47:6 physical base address of PD or PTE
279e60f8db5SAlex Xie 	 * 5:3 reserved
280e60f8db5SAlex Xie 	 * 2 C
281e60f8db5SAlex Xie 	 * 1 system
282e60f8db5SAlex Xie 	 * 0 valid
283e60f8db5SAlex Xie 	 */
284e60f8db5SAlex Xie 
285e60f8db5SAlex Xie 	/*
286e60f8db5SAlex Xie 	 * The following is for PTE only. GART does not have PDEs.
287e60f8db5SAlex Xie 	*/
288e60f8db5SAlex Xie 	value = addr & 0x0000FFFFFFFFF000ULL;
289e60f8db5SAlex Xie 	value |= flags;
290e60f8db5SAlex Xie 	writeq(value, ptr + (gpu_page_idx * 8));
291e60f8db5SAlex Xie 	return 0;
292e60f8db5SAlex Xie }
293e60f8db5SAlex Xie 
294e60f8db5SAlex Xie static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
295e60f8db5SAlex Xie 						uint32_t flags)
296e60f8db5SAlex Xie 
297e60f8db5SAlex Xie {
298e60f8db5SAlex Xie 	uint64_t pte_flag = 0;
299e60f8db5SAlex Xie 
300e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
301e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
302e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_READABLE)
303e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_READABLE;
304e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
305e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_WRITEABLE;
306e60f8db5SAlex Xie 
307e60f8db5SAlex Xie 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
308e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
309e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
310e60f8db5SAlex Xie 		break;
311e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
312e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
313e60f8db5SAlex Xie 		break;
314e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
315e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
316e60f8db5SAlex Xie 		break;
317e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
318e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
319e60f8db5SAlex Xie 		break;
320e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
321e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
322e60f8db5SAlex Xie 		break;
323e60f8db5SAlex Xie 	default:
324e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
325e60f8db5SAlex Xie 		break;
326e60f8db5SAlex Xie 	}
327e60f8db5SAlex Xie 
328e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_PRT)
329e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_PRT;
330e60f8db5SAlex Xie 
331e60f8db5SAlex Xie 	return pte_flag;
332e60f8db5SAlex Xie }
333e60f8db5SAlex Xie 
334e60f8db5SAlex Xie static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
335e60f8db5SAlex Xie 	.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
336e60f8db5SAlex Xie 	.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
337e60f8db5SAlex Xie 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags
338e60f8db5SAlex Xie };
339e60f8db5SAlex Xie 
340e60f8db5SAlex Xie static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
341e60f8db5SAlex Xie {
342e60f8db5SAlex Xie 	if (adev->gart.gart_funcs == NULL)
343e60f8db5SAlex Xie 		adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
344e60f8db5SAlex Xie }
345e60f8db5SAlex Xie 
346e60f8db5SAlex Xie static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
347e60f8db5SAlex Xie {
348e60f8db5SAlex Xie 	return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
349e60f8db5SAlex Xie }
350e60f8db5SAlex Xie 
351e60f8db5SAlex Xie static const struct amdgpu_mc_funcs gmc_v9_0_mc_funcs = {
352e60f8db5SAlex Xie 	.adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
353e60f8db5SAlex Xie };
354e60f8db5SAlex Xie 
355e60f8db5SAlex Xie static void gmc_v9_0_set_mc_funcs(struct amdgpu_device *adev)
356e60f8db5SAlex Xie {
357e60f8db5SAlex Xie 	adev->mc.mc_funcs = &gmc_v9_0_mc_funcs;
358e60f8db5SAlex Xie }
359e60f8db5SAlex Xie 
360e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
361e60f8db5SAlex Xie {
362e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
363e60f8db5SAlex Xie 
364e60f8db5SAlex Xie 	gmc_v9_0_set_gart_funcs(adev);
365e60f8db5SAlex Xie 	gmc_v9_0_set_mc_funcs(adev);
366e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
367e60f8db5SAlex Xie 
368e60f8db5SAlex Xie 	return 0;
369e60f8db5SAlex Xie }
370e60f8db5SAlex Xie 
371e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
372e60f8db5SAlex Xie {
373e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
374e60f8db5SAlex Xie 	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
375e60f8db5SAlex Xie }
376e60f8db5SAlex Xie 
377e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
378e60f8db5SAlex Xie 					struct amdgpu_mc *mc)
379e60f8db5SAlex Xie {
380eeb2487dSMonk Liu 	u64 base = 0;
381eeb2487dSMonk Liu 	if (!amdgpu_sriov_vf(adev))
382eeb2487dSMonk Liu 		base = mmhub_v1_0_get_fb_location(adev);
383e60f8db5SAlex Xie 	amdgpu_vram_location(adev, &adev->mc, base);
384e60f8db5SAlex Xie 	adev->mc.gtt_base_align = 0;
385e60f8db5SAlex Xie 	amdgpu_gtt_location(adev, mc);
386e60f8db5SAlex Xie }
387e60f8db5SAlex Xie 
388e60f8db5SAlex Xie /**
389e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
390e60f8db5SAlex Xie  *
391e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
392e60f8db5SAlex Xie  *
393e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
394e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
395e60f8db5SAlex Xie  * Returns 0 for success.
396e60f8db5SAlex Xie  */
397e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
398e60f8db5SAlex Xie {
399e60f8db5SAlex Xie 	u32 tmp;
400e60f8db5SAlex Xie 	int chansize, numchan;
401e60f8db5SAlex Xie 
402e60f8db5SAlex Xie 	/* hbm memory channel size */
403e60f8db5SAlex Xie 	chansize = 128;
404e60f8db5SAlex Xie 
405e60f8db5SAlex Xie 	tmp = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_CS_AON0_DramBaseAddress0));
406e60f8db5SAlex Xie 	tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
407e60f8db5SAlex Xie 	tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
408e60f8db5SAlex Xie 	switch (tmp) {
409e60f8db5SAlex Xie 	case 0:
410e60f8db5SAlex Xie 	default:
411e60f8db5SAlex Xie 		numchan = 1;
412e60f8db5SAlex Xie 		break;
413e60f8db5SAlex Xie 	case 1:
414e60f8db5SAlex Xie 		numchan = 2;
415e60f8db5SAlex Xie 		break;
416e60f8db5SAlex Xie 	case 2:
417e60f8db5SAlex Xie 		numchan = 0;
418e60f8db5SAlex Xie 		break;
419e60f8db5SAlex Xie 	case 3:
420e60f8db5SAlex Xie 		numchan = 4;
421e60f8db5SAlex Xie 		break;
422e60f8db5SAlex Xie 	case 4:
423e60f8db5SAlex Xie 		numchan = 0;
424e60f8db5SAlex Xie 		break;
425e60f8db5SAlex Xie 	case 5:
426e60f8db5SAlex Xie 		numchan = 8;
427e60f8db5SAlex Xie 		break;
428e60f8db5SAlex Xie 	case 6:
429e60f8db5SAlex Xie 		numchan = 0;
430e60f8db5SAlex Xie 		break;
431e60f8db5SAlex Xie 	case 7:
432e60f8db5SAlex Xie 		numchan = 16;
433e60f8db5SAlex Xie 		break;
434e60f8db5SAlex Xie 	case 8:
435e60f8db5SAlex Xie 		numchan = 2;
436e60f8db5SAlex Xie 		break;
437e60f8db5SAlex Xie 	}
438e60f8db5SAlex Xie 	adev->mc.vram_width = numchan * chansize;
439e60f8db5SAlex Xie 
440e60f8db5SAlex Xie 	/* Could aper size report 0 ? */
441e60f8db5SAlex Xie 	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
442e60f8db5SAlex Xie 	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
443e60f8db5SAlex Xie 	/* size in MB on si */
444e60f8db5SAlex Xie 	adev->mc.mc_vram_size =
445e60f8db5SAlex Xie 		nbio_v6_1_get_memsize(adev) * 1024ULL * 1024ULL;
446e60f8db5SAlex Xie 	adev->mc.real_vram_size = adev->mc.mc_vram_size;
447e60f8db5SAlex Xie 	adev->mc.visible_vram_size = adev->mc.aper_size;
448e60f8db5SAlex Xie 
449e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
450e60f8db5SAlex Xie 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
451e60f8db5SAlex Xie 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
452e60f8db5SAlex Xie 
453e60f8db5SAlex Xie 	/* unless the user had overridden it, set the gart
454e60f8db5SAlex Xie 	 * size equal to the 1024 or vram, whichever is larger.
455e60f8db5SAlex Xie 	 */
456e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1)
457e60f8db5SAlex Xie 		adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
458e60f8db5SAlex Xie 	else
459e60f8db5SAlex Xie 		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
460e60f8db5SAlex Xie 
461e60f8db5SAlex Xie 	gmc_v9_0_vram_gtt_location(adev, &adev->mc);
462e60f8db5SAlex Xie 
463e60f8db5SAlex Xie 	return 0;
464e60f8db5SAlex Xie }
465e60f8db5SAlex Xie 
466e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
467e60f8db5SAlex Xie {
468e60f8db5SAlex Xie 	int r;
469e60f8db5SAlex Xie 
470e60f8db5SAlex Xie 	if (adev->gart.robj) {
471e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
472e60f8db5SAlex Xie 		return 0;
473e60f8db5SAlex Xie 	}
474e60f8db5SAlex Xie 	/* Initialize common gart structure */
475e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
476e60f8db5SAlex Xie 	if (r)
477e60f8db5SAlex Xie 		return r;
478e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
479e60f8db5SAlex Xie 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
480e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
481e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
482e60f8db5SAlex Xie }
483e60f8db5SAlex Xie 
484e60f8db5SAlex Xie /*
485e60f8db5SAlex Xie  * vm
486e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
487e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
488e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
489e60f8db5SAlex Xie  */
490e60f8db5SAlex Xie /**
491e60f8db5SAlex Xie  * gmc_v9_0_vm_init - vm init callback
492e60f8db5SAlex Xie  *
493e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
494e60f8db5SAlex Xie  *
495e60f8db5SAlex Xie  * Inits vega10 specific vm parameters (number of VMs, base of vram for
496e60f8db5SAlex Xie  * VMIDs 1-15) (vega10).
497e60f8db5SAlex Xie  * Returns 0 for success.
498e60f8db5SAlex Xie  */
499e60f8db5SAlex Xie static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
500e60f8db5SAlex Xie {
501e60f8db5SAlex Xie 	/*
502e60f8db5SAlex Xie 	 * number of VMs
503e60f8db5SAlex Xie 	 * VMID 0 is reserved for System
504e60f8db5SAlex Xie 	 * amdgpu graphics/compute will use VMIDs 1-7
505e60f8db5SAlex Xie 	 * amdkfd will use VMIDs 8-15
506e60f8db5SAlex Xie 	 */
507e60f8db5SAlex Xie 	adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
5082de6a7c5SChunming Zhou 	adev->vm_manager.num_level = 3;
509e60f8db5SAlex Xie 	amdgpu_vm_manager_init(adev);
510e60f8db5SAlex Xie 
511e60f8db5SAlex Xie 	/* base offset of vram pages */
512e60f8db5SAlex Xie 	/*XXX This value is not zero for APU*/
513e60f8db5SAlex Xie 	adev->vm_manager.vram_base_offset = 0;
514e60f8db5SAlex Xie 
515e60f8db5SAlex Xie 	return 0;
516e60f8db5SAlex Xie }
517e60f8db5SAlex Xie 
518e60f8db5SAlex Xie /**
519e60f8db5SAlex Xie  * gmc_v9_0_vm_fini - vm fini callback
520e60f8db5SAlex Xie  *
521e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
522e60f8db5SAlex Xie  *
523e60f8db5SAlex Xie  * Tear down any asic specific VM setup.
524e60f8db5SAlex Xie  */
525e60f8db5SAlex Xie static void gmc_v9_0_vm_fini(struct amdgpu_device *adev)
526e60f8db5SAlex Xie {
527e60f8db5SAlex Xie 	return;
528e60f8db5SAlex Xie }
529e60f8db5SAlex Xie 
530e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
531e60f8db5SAlex Xie {
532e60f8db5SAlex Xie 	int r;
533e60f8db5SAlex Xie 	int dma_bits;
534e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
535e60f8db5SAlex Xie 
536e60f8db5SAlex Xie 	spin_lock_init(&adev->mc.invalidate_lock);
537e60f8db5SAlex Xie 
538e60f8db5SAlex Xie 	if (adev->flags & AMD_IS_APU) {
539e60f8db5SAlex Xie 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
540e60f8db5SAlex Xie 	} else {
541e60f8db5SAlex Xie 		/* XXX Don't know how to get VRAM type yet. */
542e60f8db5SAlex Xie 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
543e60f8db5SAlex Xie 	}
544e60f8db5SAlex Xie 
545e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
546e60f8db5SAlex Xie 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
547e60f8db5SAlex Xie 				&adev->mc.vm_fault);
548d7c434d3SFelix Kuehling 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
549d7c434d3SFelix Kuehling 				&adev->mc.vm_fault);
550e60f8db5SAlex Xie 
551e60f8db5SAlex Xie 	if (r)
552e60f8db5SAlex Xie 		return r;
553e60f8db5SAlex Xie 
5549ceaeeafSFelix Kuehling 	/* Because of four level VMPTs, vm size is at least 512GB.
5559ceaeeafSFelix Kuehling 	 * The maximum size is 256TB (48bit).
556e60f8db5SAlex Xie 	 */
5579ceaeeafSFelix Kuehling 	if (amdgpu_vm_size < 512) {
5589ceaeeafSFelix Kuehling 		DRM_WARN("VM size is at least 512GB!\n");
5599ceaeeafSFelix Kuehling 		amdgpu_vm_size = 512;
5609ceaeeafSFelix Kuehling 	}
5619ceaeeafSFelix Kuehling 	adev->vm_manager.max_pfn = (uint64_t)amdgpu_vm_size << 18;
562e60f8db5SAlex Xie 
563e60f8db5SAlex Xie 	/* Set the internal MC address mask
564e60f8db5SAlex Xie 	 * This is the max address of the GPU's
565e60f8db5SAlex Xie 	 * internal address space.
566e60f8db5SAlex Xie 	 */
567e60f8db5SAlex Xie 	adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
568e60f8db5SAlex Xie 
569e60f8db5SAlex Xie 	/* set DMA mask + need_dma32 flags.
570e60f8db5SAlex Xie 	 * PCIE - can handle 44-bits.
571e60f8db5SAlex Xie 	 * IGP - can handle 44-bits
572e60f8db5SAlex Xie 	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
573e60f8db5SAlex Xie 	 */
574e60f8db5SAlex Xie 	adev->need_dma32 = false;
575e60f8db5SAlex Xie 	dma_bits = adev->need_dma32 ? 32 : 44;
576e60f8db5SAlex Xie 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
577e60f8db5SAlex Xie 	if (r) {
578e60f8db5SAlex Xie 		adev->need_dma32 = true;
579e60f8db5SAlex Xie 		dma_bits = 32;
580e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
581e60f8db5SAlex Xie 	}
582e60f8db5SAlex Xie 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
583e60f8db5SAlex Xie 	if (r) {
584e60f8db5SAlex Xie 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
585e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
586e60f8db5SAlex Xie 	}
587e60f8db5SAlex Xie 
588e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
589e60f8db5SAlex Xie 	if (r)
590e60f8db5SAlex Xie 		return r;
591e60f8db5SAlex Xie 
592e60f8db5SAlex Xie 	/* Memory manager */
593e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
594e60f8db5SAlex Xie 	if (r)
595e60f8db5SAlex Xie 		return r;
596e60f8db5SAlex Xie 
597e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
598e60f8db5SAlex Xie 	if (r)
599e60f8db5SAlex Xie 		return r;
600e60f8db5SAlex Xie 
601e60f8db5SAlex Xie 	if (!adev->vm_manager.enabled) {
602e60f8db5SAlex Xie 		r = gmc_v9_0_vm_init(adev);
603e60f8db5SAlex Xie 		if (r) {
604e60f8db5SAlex Xie 			dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
605e60f8db5SAlex Xie 			return r;
606e60f8db5SAlex Xie 		}
607e60f8db5SAlex Xie 		adev->vm_manager.enabled = true;
608e60f8db5SAlex Xie 	}
609e60f8db5SAlex Xie 	return r;
610e60f8db5SAlex Xie }
611e60f8db5SAlex Xie 
612e60f8db5SAlex Xie /**
613e60f8db5SAlex Xie  * gmc_v8_0_gart_fini - vm fini callback
614e60f8db5SAlex Xie  *
615e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
616e60f8db5SAlex Xie  *
617e60f8db5SAlex Xie  * Tears down the driver GART/VM setup (CIK).
618e60f8db5SAlex Xie  */
619e60f8db5SAlex Xie static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
620e60f8db5SAlex Xie {
621e60f8db5SAlex Xie 	amdgpu_gart_table_vram_free(adev);
622e60f8db5SAlex Xie 	amdgpu_gart_fini(adev);
623e60f8db5SAlex Xie }
624e60f8db5SAlex Xie 
625e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
626e60f8db5SAlex Xie {
627e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
628e60f8db5SAlex Xie 
629e60f8db5SAlex Xie 	if (adev->vm_manager.enabled) {
630e60f8db5SAlex Xie 		amdgpu_vm_manager_fini(adev);
631e60f8db5SAlex Xie 		gmc_v9_0_vm_fini(adev);
632e60f8db5SAlex Xie 		adev->vm_manager.enabled = false;
633e60f8db5SAlex Xie 	}
634e60f8db5SAlex Xie 	gmc_v9_0_gart_fini(adev);
635e60f8db5SAlex Xie 	amdgpu_gem_force_release(adev);
636e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
637e60f8db5SAlex Xie 
638e60f8db5SAlex Xie 	return 0;
639e60f8db5SAlex Xie }
640e60f8db5SAlex Xie 
641e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
642e60f8db5SAlex Xie {
643e60f8db5SAlex Xie 	switch (adev->asic_type) {
644e60f8db5SAlex Xie 	case CHIP_VEGA10:
645e60f8db5SAlex Xie 		break;
646e60f8db5SAlex Xie 	default:
647e60f8db5SAlex Xie 		break;
648e60f8db5SAlex Xie 	}
649e60f8db5SAlex Xie }
650e60f8db5SAlex Xie 
651e60f8db5SAlex Xie /**
652e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
653e60f8db5SAlex Xie  *
654e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
655e60f8db5SAlex Xie  */
656e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
657e60f8db5SAlex Xie {
658e60f8db5SAlex Xie 	int r;
659e60f8db5SAlex Xie 	bool value;
660e60f8db5SAlex Xie 	u32 tmp;
661e60f8db5SAlex Xie 
662e60f8db5SAlex Xie 	amdgpu_program_register_sequence(adev,
663e60f8db5SAlex Xie 		golden_settings_vega10_hdp,
664e60f8db5SAlex Xie 		(const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
665e60f8db5SAlex Xie 
666e60f8db5SAlex Xie 	if (adev->gart.robj == NULL) {
667e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
668e60f8db5SAlex Xie 		return -EINVAL;
669e60f8db5SAlex Xie 	}
670e60f8db5SAlex Xie 	r = amdgpu_gart_table_vram_pin(adev);
671e60f8db5SAlex Xie 	if (r)
672e60f8db5SAlex Xie 		return r;
673e60f8db5SAlex Xie 
674e60f8db5SAlex Xie 	/* After HDP is initialized, flush HDP.*/
675e60f8db5SAlex Xie 	nbio_v6_1_hdp_flush(adev);
676e60f8db5SAlex Xie 
677e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
678e60f8db5SAlex Xie 	if (r)
679e60f8db5SAlex Xie 		return r;
680e60f8db5SAlex Xie 
681e60f8db5SAlex Xie 	r = mmhub_v1_0_gart_enable(adev);
682e60f8db5SAlex Xie 	if (r)
683e60f8db5SAlex Xie 		return r;
684e60f8db5SAlex Xie 
685e60f8db5SAlex Xie 	tmp = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MISC_CNTL));
686e60f8db5SAlex Xie 	tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
687e60f8db5SAlex Xie 	WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MISC_CNTL), tmp);
688e60f8db5SAlex Xie 
689e60f8db5SAlex Xie 	tmp = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_HOST_PATH_CNTL));
690e60f8db5SAlex Xie 	WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_HOST_PATH_CNTL), tmp);
691e60f8db5SAlex Xie 
692e60f8db5SAlex Xie 
693e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
694e60f8db5SAlex Xie 		value = false;
695e60f8db5SAlex Xie 	else
696e60f8db5SAlex Xie 		value = true;
697e60f8db5SAlex Xie 
698e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
699e60f8db5SAlex Xie 	mmhub_v1_0_set_fault_enable_default(adev, value);
700e60f8db5SAlex Xie 
701e60f8db5SAlex Xie 	gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
702e60f8db5SAlex Xie 
703e60f8db5SAlex Xie 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
704e60f8db5SAlex Xie 		 (unsigned)(adev->mc.gtt_size >> 20),
705e60f8db5SAlex Xie 		 (unsigned long long)adev->gart.table_addr);
706e60f8db5SAlex Xie 	adev->gart.ready = true;
707e60f8db5SAlex Xie 	return 0;
708e60f8db5SAlex Xie }
709e60f8db5SAlex Xie 
710e60f8db5SAlex Xie static int gmc_v9_0_hw_init(void *handle)
711e60f8db5SAlex Xie {
712e60f8db5SAlex Xie 	int r;
713e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
714e60f8db5SAlex Xie 
715e60f8db5SAlex Xie 	/* The sequence of these two function calls matters.*/
716e60f8db5SAlex Xie 	gmc_v9_0_init_golden_registers(adev);
717e60f8db5SAlex Xie 
718e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
719e60f8db5SAlex Xie 
720e60f8db5SAlex Xie 	return r;
721e60f8db5SAlex Xie }
722e60f8db5SAlex Xie 
723e60f8db5SAlex Xie /**
724e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
725e60f8db5SAlex Xie  *
726e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
727e60f8db5SAlex Xie  *
728e60f8db5SAlex Xie  * This disables all VM page table.
729e60f8db5SAlex Xie  */
730e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
731e60f8db5SAlex Xie {
732e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
733e60f8db5SAlex Xie 	mmhub_v1_0_gart_disable(adev);
734e60f8db5SAlex Xie 	amdgpu_gart_table_vram_unpin(adev);
735e60f8db5SAlex Xie }
736e60f8db5SAlex Xie 
737e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
738e60f8db5SAlex Xie {
739e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
740e60f8db5SAlex Xie 
741e60f8db5SAlex Xie 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
742e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
743e60f8db5SAlex Xie 
744e60f8db5SAlex Xie 	return 0;
745e60f8db5SAlex Xie }
746e60f8db5SAlex Xie 
747e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
748e60f8db5SAlex Xie {
749e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
750e60f8db5SAlex Xie 
751e60f8db5SAlex Xie 	if (adev->vm_manager.enabled) {
752e60f8db5SAlex Xie 		gmc_v9_0_vm_fini(adev);
753e60f8db5SAlex Xie 		adev->vm_manager.enabled = false;
754e60f8db5SAlex Xie 	}
755e60f8db5SAlex Xie 	gmc_v9_0_hw_fini(adev);
756e60f8db5SAlex Xie 
757e60f8db5SAlex Xie 	return 0;
758e60f8db5SAlex Xie }
759e60f8db5SAlex Xie 
760e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
761e60f8db5SAlex Xie {
762e60f8db5SAlex Xie 	int r;
763e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
764e60f8db5SAlex Xie 
765e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
766e60f8db5SAlex Xie 	if (r)
767e60f8db5SAlex Xie 		return r;
768e60f8db5SAlex Xie 
769e60f8db5SAlex Xie 	if (!adev->vm_manager.enabled) {
770e60f8db5SAlex Xie 		r = gmc_v9_0_vm_init(adev);
771e60f8db5SAlex Xie 		if (r) {
772e60f8db5SAlex Xie 			dev_err(adev->dev,
773e60f8db5SAlex Xie 				"vm manager initialization failed (%d).\n", r);
774e60f8db5SAlex Xie 			return r;
775e60f8db5SAlex Xie 		}
776e60f8db5SAlex Xie 		adev->vm_manager.enabled = true;
777e60f8db5SAlex Xie 	}
778e60f8db5SAlex Xie 
779e60f8db5SAlex Xie 	return r;
780e60f8db5SAlex Xie }
781e60f8db5SAlex Xie 
782e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
783e60f8db5SAlex Xie {
784e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
785e60f8db5SAlex Xie 	return true;
786e60f8db5SAlex Xie }
787e60f8db5SAlex Xie 
788e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
789e60f8db5SAlex Xie {
790e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
791e60f8db5SAlex Xie 	return 0;
792e60f8db5SAlex Xie }
793e60f8db5SAlex Xie 
794e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
795e60f8db5SAlex Xie {
796e60f8db5SAlex Xie 	/* XXX for emulation.*/
797e60f8db5SAlex Xie 	return 0;
798e60f8db5SAlex Xie }
799e60f8db5SAlex Xie 
800e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
801e60f8db5SAlex Xie 					enum amd_clockgating_state state)
802e60f8db5SAlex Xie {
803e60f8db5SAlex Xie 	return 0;
804e60f8db5SAlex Xie }
805e60f8db5SAlex Xie 
806e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
807e60f8db5SAlex Xie 					enum amd_powergating_state state)
808e60f8db5SAlex Xie {
809e60f8db5SAlex Xie 	return 0;
810e60f8db5SAlex Xie }
811e60f8db5SAlex Xie 
812e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
813e60f8db5SAlex Xie 	.name = "gmc_v9_0",
814e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
815e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
816e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
817e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
818e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
819e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
820e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
821e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
822e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
823e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
824e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
825e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
826e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
827e60f8db5SAlex Xie };
828e60f8db5SAlex Xie 
829e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
830e60f8db5SAlex Xie {
831e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
832e60f8db5SAlex Xie 	.major = 9,
833e60f8db5SAlex Xie 	.minor = 0,
834e60f8db5SAlex Xie 	.rev = 0,
835e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
836e60f8db5SAlex Xie };
837