xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision ccb01374)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <drm/drm_cache.h>
25 #include "amdgpu.h"
26 #include "gmc_v9_0.h"
27 #include "amdgpu_atomfirmware.h"
28 #include "amdgpu_gem.h"
29 
30 #include "hdp/hdp_4_0_offset.h"
31 #include "hdp/hdp_4_0_sh_mask.h"
32 #include "gc/gc_9_0_sh_mask.h"
33 #include "dce/dce_12_0_offset.h"
34 #include "dce/dce_12_0_sh_mask.h"
35 #include "vega10_enum.h"
36 #include "mmhub/mmhub_1_0_offset.h"
37 #include "athub/athub_1_0_offset.h"
38 #include "oss/osssys_4_0_offset.h"
39 
40 #include "soc15.h"
41 #include "soc15_common.h"
42 #include "umc/umc_6_0_sh_mask.h"
43 
44 #include "gfxhub_v1_0.h"
45 #include "mmhub_v1_0.h"
46 #include "gfxhub_v1_1.h"
47 
48 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
49 
50 /* add these here since we already include dce12 headers and these are for DCN */
51 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
52 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
53 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
54 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
55 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
56 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
57 
58 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
59 #define AMDGPU_NUM_OF_VMIDS			8
60 
61 static const u32 golden_settings_vega10_hdp[] =
62 {
63 	0xf64, 0x0fffffff, 0x00000000,
64 	0xf65, 0x0fffffff, 0x00000000,
65 	0xf66, 0x0fffffff, 0x00000000,
66 	0xf67, 0x0fffffff, 0x00000000,
67 	0xf68, 0x0fffffff, 0x00000000,
68 	0xf6a, 0x0fffffff, 0x00000000,
69 	0xf6b, 0x0fffffff, 0x00000000,
70 	0xf6c, 0x0fffffff, 0x00000000,
71 	0xf6d, 0x0fffffff, 0x00000000,
72 	0xf6e, 0x0fffffff, 0x00000000,
73 };
74 
75 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
76 {
77 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
78 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
79 };
80 
81 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
82 {
83 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
84 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
85 };
86 
87 /* Ecc related register addresses, (BASE + reg offset) */
88 /* Universal Memory Controller caps (may be fused). */
89 /* UMCCH:UmcLocalCap */
90 #define UMCLOCALCAPS_ADDR0	(0x00014306 + 0x00000000)
91 #define UMCLOCALCAPS_ADDR1	(0x00014306 + 0x00000800)
92 #define UMCLOCALCAPS_ADDR2	(0x00014306 + 0x00001000)
93 #define UMCLOCALCAPS_ADDR3	(0x00014306 + 0x00001800)
94 #define UMCLOCALCAPS_ADDR4	(0x00054306 + 0x00000000)
95 #define UMCLOCALCAPS_ADDR5	(0x00054306 + 0x00000800)
96 #define UMCLOCALCAPS_ADDR6	(0x00054306 + 0x00001000)
97 #define UMCLOCALCAPS_ADDR7	(0x00054306 + 0x00001800)
98 #define UMCLOCALCAPS_ADDR8	(0x00094306 + 0x00000000)
99 #define UMCLOCALCAPS_ADDR9	(0x00094306 + 0x00000800)
100 #define UMCLOCALCAPS_ADDR10	(0x00094306 + 0x00001000)
101 #define UMCLOCALCAPS_ADDR11	(0x00094306 + 0x00001800)
102 #define UMCLOCALCAPS_ADDR12	(0x000d4306 + 0x00000000)
103 #define UMCLOCALCAPS_ADDR13	(0x000d4306 + 0x00000800)
104 #define UMCLOCALCAPS_ADDR14	(0x000d4306 + 0x00001000)
105 #define UMCLOCALCAPS_ADDR15	(0x000d4306 + 0x00001800)
106 
107 /* Universal Memory Controller Channel config. */
108 /* UMCCH:UMC_CONFIG */
109 #define UMCCH_UMC_CONFIG_ADDR0	(0x00014040 + 0x00000000)
110 #define UMCCH_UMC_CONFIG_ADDR1	(0x00014040 + 0x00000800)
111 #define UMCCH_UMC_CONFIG_ADDR2	(0x00014040 + 0x00001000)
112 #define UMCCH_UMC_CONFIG_ADDR3	(0x00014040 + 0x00001800)
113 #define UMCCH_UMC_CONFIG_ADDR4	(0x00054040 + 0x00000000)
114 #define UMCCH_UMC_CONFIG_ADDR5	(0x00054040 + 0x00000800)
115 #define UMCCH_UMC_CONFIG_ADDR6	(0x00054040 + 0x00001000)
116 #define UMCCH_UMC_CONFIG_ADDR7	(0x00054040 + 0x00001800)
117 #define UMCCH_UMC_CONFIG_ADDR8	(0x00094040 + 0x00000000)
118 #define UMCCH_UMC_CONFIG_ADDR9	(0x00094040 + 0x00000800)
119 #define UMCCH_UMC_CONFIG_ADDR10	(0x00094040 + 0x00001000)
120 #define UMCCH_UMC_CONFIG_ADDR11	(0x00094040 + 0x00001800)
121 #define UMCCH_UMC_CONFIG_ADDR12	(0x000d4040 + 0x00000000)
122 #define UMCCH_UMC_CONFIG_ADDR13	(0x000d4040 + 0x00000800)
123 #define UMCCH_UMC_CONFIG_ADDR14	(0x000d4040 + 0x00001000)
124 #define UMCCH_UMC_CONFIG_ADDR15	(0x000d4040 + 0x00001800)
125 
126 /* Universal Memory Controller Channel Ecc config. */
127 /* UMCCH:EccCtrl */
128 #define UMCCH_ECCCTRL_ADDR0	(0x00014053 + 0x00000000)
129 #define UMCCH_ECCCTRL_ADDR1	(0x00014053 + 0x00000800)
130 #define UMCCH_ECCCTRL_ADDR2	(0x00014053 + 0x00001000)
131 #define UMCCH_ECCCTRL_ADDR3	(0x00014053 + 0x00001800)
132 #define UMCCH_ECCCTRL_ADDR4	(0x00054053 + 0x00000000)
133 #define UMCCH_ECCCTRL_ADDR5	(0x00054053 + 0x00000800)
134 #define UMCCH_ECCCTRL_ADDR6	(0x00054053 + 0x00001000)
135 #define UMCCH_ECCCTRL_ADDR7	(0x00054053 + 0x00001800)
136 #define UMCCH_ECCCTRL_ADDR8	(0x00094053 + 0x00000000)
137 #define UMCCH_ECCCTRL_ADDR9	(0x00094053 + 0x00000800)
138 #define UMCCH_ECCCTRL_ADDR10	(0x00094053 + 0x00001000)
139 #define UMCCH_ECCCTRL_ADDR11	(0x00094053 + 0x00001800)
140 #define UMCCH_ECCCTRL_ADDR12	(0x000d4053 + 0x00000000)
141 #define UMCCH_ECCCTRL_ADDR13	(0x000d4053 + 0x00000800)
142 #define UMCCH_ECCCTRL_ADDR14	(0x000d4053 + 0x00001000)
143 #define UMCCH_ECCCTRL_ADDR15	(0x000d4053 + 0x00001800)
144 
145 static const uint32_t ecc_umclocalcap_addrs[] = {
146 	UMCLOCALCAPS_ADDR0,
147 	UMCLOCALCAPS_ADDR1,
148 	UMCLOCALCAPS_ADDR2,
149 	UMCLOCALCAPS_ADDR3,
150 	UMCLOCALCAPS_ADDR4,
151 	UMCLOCALCAPS_ADDR5,
152 	UMCLOCALCAPS_ADDR6,
153 	UMCLOCALCAPS_ADDR7,
154 	UMCLOCALCAPS_ADDR8,
155 	UMCLOCALCAPS_ADDR9,
156 	UMCLOCALCAPS_ADDR10,
157 	UMCLOCALCAPS_ADDR11,
158 	UMCLOCALCAPS_ADDR12,
159 	UMCLOCALCAPS_ADDR13,
160 	UMCLOCALCAPS_ADDR14,
161 	UMCLOCALCAPS_ADDR15,
162 };
163 
164 static const uint32_t ecc_umcch_umc_config_addrs[] = {
165 	UMCCH_UMC_CONFIG_ADDR0,
166 	UMCCH_UMC_CONFIG_ADDR1,
167 	UMCCH_UMC_CONFIG_ADDR2,
168 	UMCCH_UMC_CONFIG_ADDR3,
169 	UMCCH_UMC_CONFIG_ADDR4,
170 	UMCCH_UMC_CONFIG_ADDR5,
171 	UMCCH_UMC_CONFIG_ADDR6,
172 	UMCCH_UMC_CONFIG_ADDR7,
173 	UMCCH_UMC_CONFIG_ADDR8,
174 	UMCCH_UMC_CONFIG_ADDR9,
175 	UMCCH_UMC_CONFIG_ADDR10,
176 	UMCCH_UMC_CONFIG_ADDR11,
177 	UMCCH_UMC_CONFIG_ADDR12,
178 	UMCCH_UMC_CONFIG_ADDR13,
179 	UMCCH_UMC_CONFIG_ADDR14,
180 	UMCCH_UMC_CONFIG_ADDR15,
181 };
182 
183 static const uint32_t ecc_umcch_eccctrl_addrs[] = {
184 	UMCCH_ECCCTRL_ADDR0,
185 	UMCCH_ECCCTRL_ADDR1,
186 	UMCCH_ECCCTRL_ADDR2,
187 	UMCCH_ECCCTRL_ADDR3,
188 	UMCCH_ECCCTRL_ADDR4,
189 	UMCCH_ECCCTRL_ADDR5,
190 	UMCCH_ECCCTRL_ADDR6,
191 	UMCCH_ECCCTRL_ADDR7,
192 	UMCCH_ECCCTRL_ADDR8,
193 	UMCCH_ECCCTRL_ADDR9,
194 	UMCCH_ECCCTRL_ADDR10,
195 	UMCCH_ECCCTRL_ADDR11,
196 	UMCCH_ECCCTRL_ADDR12,
197 	UMCCH_ECCCTRL_ADDR13,
198 	UMCCH_ECCCTRL_ADDR14,
199 	UMCCH_ECCCTRL_ADDR15,
200 };
201 
202 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
203 					struct amdgpu_irq_src *src,
204 					unsigned type,
205 					enum amdgpu_interrupt_state state)
206 {
207 	struct amdgpu_vmhub *hub;
208 	u32 tmp, reg, bits, i, j;
209 
210 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
211 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
212 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
213 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
214 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
215 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
216 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
217 
218 	switch (state) {
219 	case AMDGPU_IRQ_STATE_DISABLE:
220 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
221 			hub = &adev->vmhub[j];
222 			for (i = 0; i < 16; i++) {
223 				reg = hub->vm_context0_cntl + i;
224 				tmp = RREG32(reg);
225 				tmp &= ~bits;
226 				WREG32(reg, tmp);
227 			}
228 		}
229 		break;
230 	case AMDGPU_IRQ_STATE_ENABLE:
231 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
232 			hub = &adev->vmhub[j];
233 			for (i = 0; i < 16; i++) {
234 				reg = hub->vm_context0_cntl + i;
235 				tmp = RREG32(reg);
236 				tmp |= bits;
237 				WREG32(reg, tmp);
238 			}
239 		}
240 	default:
241 		break;
242 	}
243 
244 	return 0;
245 }
246 
247 /**
248  * vega10_ih_prescreen_iv - prescreen an interrupt vector
249  *
250  * @adev: amdgpu_device pointer
251  *
252  * Returns true if the interrupt vector should be further processed.
253  */
254 static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
255 				  struct amdgpu_iv_entry *entry,
256 				  uint64_t addr)
257 {
258 	struct amdgpu_vm *vm;
259 	u64 key;
260 	int r;
261 
262 	/* No PASID, can't identify faulting process */
263 	if (!entry->pasid)
264 		return true;
265 
266 	/* Not a retry fault */
267 	if (!(entry->src_data[1] & 0x80))
268 		return true;
269 
270 	/* Track retry faults in per-VM fault FIFO. */
271 	spin_lock(&adev->vm_manager.pasid_lock);
272 	vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
273 	if (!vm) {
274 		/* VM not found, process it normally */
275 		spin_unlock(&adev->vm_manager.pasid_lock);
276 		return true;
277 	}
278 
279 	key = AMDGPU_VM_FAULT(entry->pasid, addr);
280 	r = amdgpu_vm_add_fault(vm->fault_hash, key);
281 
282 	/* Hash table is full or the fault is already being processed,
283 	 * ignore further page faults
284 	 */
285 	if (r != 0) {
286 		spin_unlock(&adev->vm_manager.pasid_lock);
287 		return false;
288 	}
289 	/* No locking required with single writer and single reader */
290 	r = kfifo_put(&vm->faults, key);
291 	if (!r) {
292 		/* FIFO is full. Ignore it until there is space */
293 		amdgpu_vm_clear_fault(vm->fault_hash, key);
294 		spin_unlock(&adev->vm_manager.pasid_lock);
295 		return false;
296 	}
297 
298 	spin_unlock(&adev->vm_manager.pasid_lock);
299 	/* It's the first fault for this address, process it normally */
300 	return true;
301 }
302 
303 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
304 				struct amdgpu_irq_src *source,
305 				struct amdgpu_iv_entry *entry)
306 {
307 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
308 	uint32_t status = 0;
309 	u64 addr;
310 
311 	addr = (u64)entry->src_data[0] << 12;
312 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
313 
314 	if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
315 		return 1; /* This also prevents sending it to KFD */
316 
317 	if (!amdgpu_sriov_vf(adev)) {
318 		status = RREG32(hub->vm_l2_pro_fault_status);
319 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
320 	}
321 
322 	if (printk_ratelimit()) {
323 		struct amdgpu_task_info task_info = { 0 };
324 
325 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
326 
327 		dev_err(adev->dev,
328 			"[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
329 			entry->vmid_src ? "mmhub" : "gfxhub",
330 			entry->src_id, entry->ring_id, entry->vmid,
331 			entry->pasid, task_info.process_name, task_info.tgid,
332 			task_info.task_name, task_info.pid);
333 		dev_err(adev->dev, "  in page starting at address 0x%016llx from %d\n",
334 			addr, entry->client_id);
335 		if (!amdgpu_sriov_vf(adev))
336 			dev_err(adev->dev,
337 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
338 				status);
339 	}
340 
341 	return 0;
342 }
343 
344 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
345 	.set = gmc_v9_0_vm_fault_interrupt_state,
346 	.process = gmc_v9_0_process_interrupt,
347 };
348 
349 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
350 {
351 	adev->gmc.vm_fault.num_types = 1;
352 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
353 }
354 
355 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
356 					uint32_t flush_type)
357 {
358 	u32 req = 0;
359 
360 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
361 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
362 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
363 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
364 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
365 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
366 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
367 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
368 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
369 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
370 
371 	return req;
372 }
373 
374 /*
375  * GART
376  * VMID 0 is the physical GPU addresses as used by the kernel.
377  * VMIDs 1-15 are used for userspace clients and are handled
378  * by the amdgpu vm/hsa code.
379  */
380 
381 /**
382  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
383  *
384  * @adev: amdgpu_device pointer
385  * @vmid: vm instance to flush
386  * @flush_type: the flush type
387  *
388  * Flush the TLB for the requested page table using certain type.
389  */
390 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
391 				uint32_t vmid, uint32_t flush_type)
392 {
393 	const unsigned eng = 17;
394 	unsigned i, j;
395 
396 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
397 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
398 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
399 
400 		/* This is necessary for a HW workaround under SRIOV as well
401 		 * as GFXOFF under bare metal
402 		 */
403 		if (adev->gfx.kiq.ring.sched.ready &&
404 		    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
405 		    !adev->in_gpu_reset) {
406 			uint32_t req = hub->vm_inv_eng0_req + eng;
407 			uint32_t ack = hub->vm_inv_eng0_ack + eng;
408 
409 			amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
410 							   1 << vmid);
411 			continue;
412 		}
413 
414 		spin_lock(&adev->gmc.invalidate_lock);
415 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
416 		for (j = 0; j < adev->usec_timeout; j++) {
417 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
418 			if (tmp & (1 << vmid))
419 				break;
420 			udelay(1);
421 		}
422 		spin_unlock(&adev->gmc.invalidate_lock);
423 		if (j < adev->usec_timeout)
424 			continue;
425 
426 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
427 	}
428 }
429 
430 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
431 					    unsigned vmid, uint64_t pd_addr)
432 {
433 	struct amdgpu_device *adev = ring->adev;
434 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
435 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
436 	unsigned eng = ring->vm_inv_eng;
437 
438 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
439 			      lower_32_bits(pd_addr));
440 
441 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
442 			      upper_32_bits(pd_addr));
443 
444 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
445 					    hub->vm_inv_eng0_ack + eng,
446 					    req, 1 << vmid);
447 
448 	return pd_addr;
449 }
450 
451 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
452 					unsigned pasid)
453 {
454 	struct amdgpu_device *adev = ring->adev;
455 	uint32_t reg;
456 
457 	if (ring->funcs->vmhub == AMDGPU_GFXHUB)
458 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
459 	else
460 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
461 
462 	amdgpu_ring_emit_wreg(ring, reg, pasid);
463 }
464 
465 /**
466  * gmc_v9_0_set_pte_pde - update the page tables using MMIO
467  *
468  * @adev: amdgpu_device pointer
469  * @cpu_pt_addr: cpu address of the page table
470  * @gpu_page_idx: entry in the page table to update
471  * @addr: dst addr to write into pte/pde
472  * @flags: access flags
473  *
474  * Update the page tables using the CPU.
475  */
476 static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
477 				uint32_t gpu_page_idx, uint64_t addr,
478 				uint64_t flags)
479 {
480 	void __iomem *ptr = (void *)cpu_pt_addr;
481 	uint64_t value;
482 
483 	/*
484 	 * PTE format on VEGA 10:
485 	 * 63:59 reserved
486 	 * 58:57 mtype
487 	 * 56 F
488 	 * 55 L
489 	 * 54 P
490 	 * 53 SW
491 	 * 52 T
492 	 * 50:48 reserved
493 	 * 47:12 4k physical page base address
494 	 * 11:7 fragment
495 	 * 6 write
496 	 * 5 read
497 	 * 4 exe
498 	 * 3 Z
499 	 * 2 snooped
500 	 * 1 system
501 	 * 0 valid
502 	 *
503 	 * PDE format on VEGA 10:
504 	 * 63:59 block fragment size
505 	 * 58:55 reserved
506 	 * 54 P
507 	 * 53:48 reserved
508 	 * 47:6 physical base address of PD or PTE
509 	 * 5:3 reserved
510 	 * 2 C
511 	 * 1 system
512 	 * 0 valid
513 	 */
514 
515 	/*
516 	 * The following is for PTE only. GART does not have PDEs.
517 	*/
518 	value = addr & 0x0000FFFFFFFFF000ULL;
519 	value |= flags;
520 	writeq(value, ptr + (gpu_page_idx * 8));
521 	return 0;
522 }
523 
524 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
525 						uint32_t flags)
526 
527 {
528 	uint64_t pte_flag = 0;
529 
530 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
531 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
532 	if (flags & AMDGPU_VM_PAGE_READABLE)
533 		pte_flag |= AMDGPU_PTE_READABLE;
534 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
535 		pte_flag |= AMDGPU_PTE_WRITEABLE;
536 
537 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
538 	case AMDGPU_VM_MTYPE_DEFAULT:
539 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
540 		break;
541 	case AMDGPU_VM_MTYPE_NC:
542 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
543 		break;
544 	case AMDGPU_VM_MTYPE_WC:
545 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
546 		break;
547 	case AMDGPU_VM_MTYPE_CC:
548 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
549 		break;
550 	case AMDGPU_VM_MTYPE_UC:
551 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
552 		break;
553 	default:
554 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
555 		break;
556 	}
557 
558 	if (flags & AMDGPU_VM_PAGE_PRT)
559 		pte_flag |= AMDGPU_PTE_PRT;
560 
561 	return pte_flag;
562 }
563 
564 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
565 				uint64_t *addr, uint64_t *flags)
566 {
567 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
568 		*addr = adev->vm_manager.vram_base_offset + *addr -
569 			adev->gmc.vram_start;
570 	BUG_ON(*addr & 0xFFFF00000000003FULL);
571 
572 	if (!adev->gmc.translate_further)
573 		return;
574 
575 	if (level == AMDGPU_VM_PDB1) {
576 		/* Set the block fragment size */
577 		if (!(*flags & AMDGPU_PDE_PTE))
578 			*flags |= AMDGPU_PDE_BFS(0x9);
579 
580 	} else if (level == AMDGPU_VM_PDB0) {
581 		if (*flags & AMDGPU_PDE_PTE)
582 			*flags &= ~AMDGPU_PDE_PTE;
583 		else
584 			*flags |= AMDGPU_PTE_TF;
585 	}
586 }
587 
588 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
589 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
590 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
591 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
592 	.set_pte_pde = gmc_v9_0_set_pte_pde,
593 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
594 	.get_vm_pde = gmc_v9_0_get_vm_pde
595 };
596 
597 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
598 {
599 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
600 }
601 
602 static int gmc_v9_0_early_init(void *handle)
603 {
604 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
605 
606 	gmc_v9_0_set_gmc_funcs(adev);
607 	gmc_v9_0_set_irq_funcs(adev);
608 
609 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
610 	adev->gmc.shared_aperture_end =
611 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
612 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
613 	adev->gmc.private_aperture_end =
614 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
615 
616 	return 0;
617 }
618 
619 static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
620 {
621 	uint32_t reg_val;
622 	uint32_t reg_addr;
623 	uint32_t field_val;
624 	size_t i;
625 	uint32_t fv2;
626 	size_t lost_sheep;
627 
628 	DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
629 
630 	lost_sheep = 0;
631 	for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
632 		reg_addr = ecc_umclocalcap_addrs[i];
633 		DRM_DEBUG("ecc: "
634 			  "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
635 			  i, reg_addr);
636 		reg_val = RREG32(reg_addr);
637 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
638 					  EccDis);
639 		DRM_DEBUG("ecc: "
640 			  "reg_val: 0x%08x, "
641 			  "EccDis: 0x%08x, ",
642 			  reg_val, field_val);
643 		if (field_val) {
644 			DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
645 			++lost_sheep;
646 		}
647 	}
648 
649 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
650 		reg_addr = ecc_umcch_umc_config_addrs[i];
651 		DRM_DEBUG("ecc: "
652 			  "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
653 			  i, reg_addr);
654 		reg_val = RREG32(reg_addr);
655 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
656 					  DramReady);
657 		DRM_DEBUG("ecc: "
658 			  "reg_val: 0x%08x, "
659 			  "DramReady: 0x%08x\n",
660 			  reg_val, field_val);
661 
662 		if (!field_val) {
663 			DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
664 			++lost_sheep;
665 		}
666 	}
667 
668 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
669 		reg_addr = ecc_umcch_eccctrl_addrs[i];
670 		DRM_DEBUG("ecc: "
671 			  "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
672 			  i, reg_addr);
673 		reg_val = RREG32(reg_addr);
674 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
675 					  WrEccEn);
676 		fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
677 				    RdEccEn);
678 		DRM_DEBUG("ecc: "
679 			  "reg_val: 0x%08x, "
680 			  "WrEccEn: 0x%08x, "
681 			  "RdEccEn: 0x%08x\n",
682 			  reg_val, field_val, fv2);
683 
684 		if (!field_val) {
685 			DRM_DEBUG("ecc: WrEccEn is not set\n");
686 			++lost_sheep;
687 		}
688 		if (!fv2) {
689 			DRM_DEBUG("ecc: RdEccEn is not set\n");
690 			++lost_sheep;
691 		}
692 	}
693 
694 	DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
695 	return lost_sheep == 0;
696 }
697 
698 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
699 {
700 
701 	/*
702 	 * TODO:
703 	 * Currently there is a bug where some memory client outside
704 	 * of the driver writes to first 8M of VRAM on S3 resume,
705 	 * this overrides GART which by default gets placed in first 8M and
706 	 * causes VM_FAULTS once GTT is accessed.
707 	 * Keep the stolen memory reservation until the while this is not solved.
708 	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
709 	 */
710 	switch (adev->asic_type) {
711 	case CHIP_VEGA10:
712 		return true;
713 	case CHIP_RAVEN:
714 	case CHIP_VEGA12:
715 	case CHIP_VEGA20:
716 	default:
717 		return false;
718 	}
719 }
720 
721 static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
722 {
723 	struct amdgpu_ring *ring;
724 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
725 		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP};
726 	unsigned i;
727 	unsigned vmhub, inv_eng;
728 
729 	for (i = 0; i < adev->num_rings; ++i) {
730 		ring = adev->rings[i];
731 		vmhub = ring->funcs->vmhub;
732 
733 		inv_eng = ffs(vm_inv_engs[vmhub]);
734 		if (!inv_eng) {
735 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
736 				ring->name);
737 			return -EINVAL;
738 		}
739 
740 		ring->vm_inv_eng = inv_eng - 1;
741 		change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
742 
743 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
744 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
745 	}
746 
747 	return 0;
748 }
749 
750 static int gmc_v9_0_late_init(void *handle)
751 {
752 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
753 	int r;
754 
755 	if (!gmc_v9_0_keep_stolen_memory(adev))
756 		amdgpu_bo_late_init(adev);
757 
758 	r = gmc_v9_0_allocate_vm_inv_eng(adev);
759 	if (r)
760 		return r;
761 
762 	if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
763 		r = gmc_v9_0_ecc_available(adev);
764 		if (r == 1) {
765 			DRM_INFO("ECC is active.\n");
766 		} else if (r == 0) {
767 			DRM_INFO("ECC is not present.\n");
768 			adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
769 		} else {
770 			DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
771 			return r;
772 		}
773 	}
774 
775 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
776 }
777 
778 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
779 					struct amdgpu_gmc *mc)
780 {
781 	u64 base = 0;
782 	if (!amdgpu_sriov_vf(adev))
783 		base = mmhub_v1_0_get_fb_location(adev);
784 	/* add the xgmi offset of the physical node */
785 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
786 	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
787 	amdgpu_gmc_gart_location(adev, mc);
788 	if (!amdgpu_sriov_vf(adev))
789 		amdgpu_gmc_agp_location(adev, mc);
790 	/* base offset of vram pages */
791 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
792 
793 	/* XXX: add the xgmi offset of the physical node? */
794 	adev->vm_manager.vram_base_offset +=
795 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
796 }
797 
798 /**
799  * gmc_v9_0_mc_init - initialize the memory controller driver params
800  *
801  * @adev: amdgpu_device pointer
802  *
803  * Look up the amount of vram, vram width, and decide how to place
804  * vram and gart within the GPU's physical address space.
805  * Returns 0 for success.
806  */
807 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
808 {
809 	int chansize, numchan;
810 	int r;
811 
812 	if (amdgpu_emu_mode != 1)
813 		adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
814 	if (!adev->gmc.vram_width) {
815 		/* hbm memory channel size */
816 		if (adev->flags & AMD_IS_APU)
817 			chansize = 64;
818 		else
819 			chansize = 128;
820 
821 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
822 		adev->gmc.vram_width = numchan * chansize;
823 	}
824 
825 	/* size in MB on si */
826 	adev->gmc.mc_vram_size =
827 		adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
828 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
829 
830 	if (!(adev->flags & AMD_IS_APU)) {
831 		r = amdgpu_device_resize_fb_bar(adev);
832 		if (r)
833 			return r;
834 	}
835 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
836 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
837 
838 #ifdef CONFIG_X86_64
839 	if (adev->flags & AMD_IS_APU) {
840 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
841 		adev->gmc.aper_size = adev->gmc.real_vram_size;
842 	}
843 #endif
844 	/* In case the PCI BAR is larger than the actual amount of vram */
845 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
846 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
847 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
848 
849 	/* set the gart size */
850 	if (amdgpu_gart_size == -1) {
851 		switch (adev->asic_type) {
852 		case CHIP_VEGA10:  /* all engines support GPUVM */
853 		case CHIP_VEGA12:  /* all engines support GPUVM */
854 		case CHIP_VEGA20:
855 		default:
856 			adev->gmc.gart_size = 512ULL << 20;
857 			break;
858 		case CHIP_RAVEN:   /* DCE SG support */
859 			adev->gmc.gart_size = 1024ULL << 20;
860 			break;
861 		}
862 	} else {
863 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
864 	}
865 
866 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
867 
868 	return 0;
869 }
870 
871 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
872 {
873 	int r;
874 
875 	if (adev->gart.bo) {
876 		WARN(1, "VEGA10 PCIE GART already initialized\n");
877 		return 0;
878 	}
879 	/* Initialize common gart structure */
880 	r = amdgpu_gart_init(adev);
881 	if (r)
882 		return r;
883 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
884 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
885 				 AMDGPU_PTE_EXECUTABLE;
886 	return amdgpu_gart_table_vram_alloc(adev);
887 }
888 
889 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
890 {
891 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
892 	unsigned size;
893 
894 	/*
895 	 * TODO Remove once GART corruption is resolved
896 	 * Check related code in gmc_v9_0_sw_fini
897 	 * */
898 	if (gmc_v9_0_keep_stolen_memory(adev))
899 		return 9 * 1024 * 1024;
900 
901 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
902 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
903 	} else {
904 		u32 viewport;
905 
906 		switch (adev->asic_type) {
907 		case CHIP_RAVEN:
908 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
909 			size = (REG_GET_FIELD(viewport,
910 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
911 				REG_GET_FIELD(viewport,
912 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
913 				4);
914 			break;
915 		case CHIP_VEGA10:
916 		case CHIP_VEGA12:
917 		case CHIP_VEGA20:
918 		default:
919 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
920 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
921 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
922 				4);
923 			break;
924 		}
925 	}
926 	/* return 0 if the pre-OS buffer uses up most of vram */
927 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
928 		return 0;
929 
930 	return size;
931 }
932 
933 static int gmc_v9_0_sw_init(void *handle)
934 {
935 	int r;
936 	int dma_bits;
937 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
938 
939 	gfxhub_v1_0_init(adev);
940 	mmhub_v1_0_init(adev);
941 
942 	spin_lock_init(&adev->gmc.invalidate_lock);
943 
944 	adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
945 	switch (adev->asic_type) {
946 	case CHIP_RAVEN:
947 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
948 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
949 		} else {
950 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
951 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
952 			adev->gmc.translate_further =
953 				adev->vm_manager.num_level > 1;
954 		}
955 		break;
956 	case CHIP_VEGA10:
957 	case CHIP_VEGA12:
958 	case CHIP_VEGA20:
959 		/*
960 		 * To fulfill 4-level page support,
961 		 * vm size is 256TB (48bit), maximum size of Vega10,
962 		 * block size 512 (9bit)
963 		 */
964 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
965 		break;
966 	default:
967 		break;
968 	}
969 
970 	/* This interrupt is VMC page fault.*/
971 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
972 				&adev->gmc.vm_fault);
973 	if (r)
974 		return r;
975 
976 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
977 				&adev->gmc.vm_fault);
978 
979 	if (r)
980 		return r;
981 
982 	/* Set the internal MC address mask
983 	 * This is the max address of the GPU's
984 	 * internal address space.
985 	 */
986 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
987 
988 	/* set DMA mask + need_dma32 flags.
989 	 * PCIE - can handle 44-bits.
990 	 * IGP - can handle 44-bits
991 	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
992 	 */
993 	adev->need_dma32 = false;
994 	dma_bits = adev->need_dma32 ? 32 : 44;
995 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
996 	if (r) {
997 		adev->need_dma32 = true;
998 		dma_bits = 32;
999 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1000 	}
1001 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1002 	if (r) {
1003 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1004 		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
1005 	}
1006 	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
1007 
1008 	if (adev->gmc.xgmi.supported) {
1009 		r = gfxhub_v1_1_get_xgmi_info(adev);
1010 		if (r)
1011 			return r;
1012 	}
1013 
1014 	r = gmc_v9_0_mc_init(adev);
1015 	if (r)
1016 		return r;
1017 
1018 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1019 
1020 	/* Memory manager */
1021 	r = amdgpu_bo_init(adev);
1022 	if (r)
1023 		return r;
1024 
1025 	r = gmc_v9_0_gart_init(adev);
1026 	if (r)
1027 		return r;
1028 
1029 	/*
1030 	 * number of VMs
1031 	 * VMID 0 is reserved for System
1032 	 * amdgpu graphics/compute will use VMIDs 1-7
1033 	 * amdkfd will use VMIDs 8-15
1034 	 */
1035 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
1036 	adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
1037 
1038 	amdgpu_vm_manager_init(adev);
1039 
1040 	return 0;
1041 }
1042 
1043 static int gmc_v9_0_sw_fini(void *handle)
1044 {
1045 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1046 
1047 	amdgpu_gem_force_release(adev);
1048 	amdgpu_vm_manager_fini(adev);
1049 
1050 	if (gmc_v9_0_keep_stolen_memory(adev))
1051 		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1052 
1053 	amdgpu_gart_table_vram_free(adev);
1054 	amdgpu_bo_fini(adev);
1055 	amdgpu_gart_fini(adev);
1056 
1057 	return 0;
1058 }
1059 
1060 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1061 {
1062 
1063 	switch (adev->asic_type) {
1064 	case CHIP_VEGA10:
1065 	case CHIP_VEGA20:
1066 		soc15_program_register_sequence(adev,
1067 						golden_settings_mmhub_1_0_0,
1068 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1069 		soc15_program_register_sequence(adev,
1070 						golden_settings_athub_1_0_0,
1071 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1072 		break;
1073 	case CHIP_VEGA12:
1074 		break;
1075 	case CHIP_RAVEN:
1076 		soc15_program_register_sequence(adev,
1077 						golden_settings_athub_1_0_0,
1078 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1079 		break;
1080 	default:
1081 		break;
1082 	}
1083 }
1084 
1085 /**
1086  * gmc_v9_0_gart_enable - gart enable
1087  *
1088  * @adev: amdgpu_device pointer
1089  */
1090 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1091 {
1092 	int r;
1093 	bool value;
1094 	u32 tmp;
1095 
1096 	amdgpu_device_program_register_sequence(adev,
1097 						golden_settings_vega10_hdp,
1098 						ARRAY_SIZE(golden_settings_vega10_hdp));
1099 
1100 	if (adev->gart.bo == NULL) {
1101 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1102 		return -EINVAL;
1103 	}
1104 	r = amdgpu_gart_table_vram_pin(adev);
1105 	if (r)
1106 		return r;
1107 
1108 	switch (adev->asic_type) {
1109 	case CHIP_RAVEN:
1110 		mmhub_v1_0_update_power_gating(adev, true);
1111 		break;
1112 	default:
1113 		break;
1114 	}
1115 
1116 	r = gfxhub_v1_0_gart_enable(adev);
1117 	if (r)
1118 		return r;
1119 
1120 	r = mmhub_v1_0_gart_enable(adev);
1121 	if (r)
1122 		return r;
1123 
1124 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1125 
1126 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1127 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1128 
1129 	/* After HDP is initialized, flush HDP.*/
1130 	adev->nbio_funcs->hdp_flush(adev, NULL);
1131 
1132 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1133 		value = false;
1134 	else
1135 		value = true;
1136 
1137 	gfxhub_v1_0_set_fault_enable_default(adev, value);
1138 	mmhub_v1_0_set_fault_enable_default(adev, value);
1139 	gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
1140 
1141 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1142 		 (unsigned)(adev->gmc.gart_size >> 20),
1143 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1144 	adev->gart.ready = true;
1145 	return 0;
1146 }
1147 
1148 static int gmc_v9_0_hw_init(void *handle)
1149 {
1150 	int r;
1151 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1152 
1153 	/* The sequence of these two function calls matters.*/
1154 	gmc_v9_0_init_golden_registers(adev);
1155 
1156 	if (adev->mode_info.num_crtc) {
1157 		/* Lockout access through VGA aperture*/
1158 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1159 
1160 		/* disable VGA render */
1161 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1162 	}
1163 
1164 	r = gmc_v9_0_gart_enable(adev);
1165 
1166 	return r;
1167 }
1168 
1169 /**
1170  * gmc_v9_0_gart_disable - gart disable
1171  *
1172  * @adev: amdgpu_device pointer
1173  *
1174  * This disables all VM page table.
1175  */
1176 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1177 {
1178 	gfxhub_v1_0_gart_disable(adev);
1179 	mmhub_v1_0_gart_disable(adev);
1180 	amdgpu_gart_table_vram_unpin(adev);
1181 }
1182 
1183 static int gmc_v9_0_hw_fini(void *handle)
1184 {
1185 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1186 
1187 	if (amdgpu_sriov_vf(adev)) {
1188 		/* full access mode, so don't touch any GMC register */
1189 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1190 		return 0;
1191 	}
1192 
1193 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1194 	gmc_v9_0_gart_disable(adev);
1195 
1196 	return 0;
1197 }
1198 
1199 static int gmc_v9_0_suspend(void *handle)
1200 {
1201 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1202 
1203 	return gmc_v9_0_hw_fini(adev);
1204 }
1205 
1206 static int gmc_v9_0_resume(void *handle)
1207 {
1208 	int r;
1209 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1210 
1211 	r = gmc_v9_0_hw_init(adev);
1212 	if (r)
1213 		return r;
1214 
1215 	amdgpu_vmid_reset_all(adev);
1216 
1217 	return 0;
1218 }
1219 
1220 static bool gmc_v9_0_is_idle(void *handle)
1221 {
1222 	/* MC is always ready in GMC v9.*/
1223 	return true;
1224 }
1225 
1226 static int gmc_v9_0_wait_for_idle(void *handle)
1227 {
1228 	/* There is no need to wait for MC idle in GMC v9.*/
1229 	return 0;
1230 }
1231 
1232 static int gmc_v9_0_soft_reset(void *handle)
1233 {
1234 	/* XXX for emulation.*/
1235 	return 0;
1236 }
1237 
1238 static int gmc_v9_0_set_clockgating_state(void *handle,
1239 					enum amd_clockgating_state state)
1240 {
1241 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1242 
1243 	return mmhub_v1_0_set_clockgating(adev, state);
1244 }
1245 
1246 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1247 {
1248 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1249 
1250 	mmhub_v1_0_get_clockgating(adev, flags);
1251 }
1252 
1253 static int gmc_v9_0_set_powergating_state(void *handle,
1254 					enum amd_powergating_state state)
1255 {
1256 	return 0;
1257 }
1258 
1259 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1260 	.name = "gmc_v9_0",
1261 	.early_init = gmc_v9_0_early_init,
1262 	.late_init = gmc_v9_0_late_init,
1263 	.sw_init = gmc_v9_0_sw_init,
1264 	.sw_fini = gmc_v9_0_sw_fini,
1265 	.hw_init = gmc_v9_0_hw_init,
1266 	.hw_fini = gmc_v9_0_hw_fini,
1267 	.suspend = gmc_v9_0_suspend,
1268 	.resume = gmc_v9_0_resume,
1269 	.is_idle = gmc_v9_0_is_idle,
1270 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1271 	.soft_reset = gmc_v9_0_soft_reset,
1272 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1273 	.set_powergating_state = gmc_v9_0_set_powergating_state,
1274 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1275 };
1276 
1277 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1278 {
1279 	.type = AMD_IP_BLOCK_TYPE_GMC,
1280 	.major = 9,
1281 	.minor = 0,
1282 	.rev = 0,
1283 	.funcs = &gmc_v9_0_ip_funcs,
1284 };
1285