xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 03740baa)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26 
27 #include <drm/drm_cache.h>
28 
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33 
34 #include "hdp/hdp_4_0_offset.h"
35 #include "hdp/hdp_4_0_sh_mask.h"
36 #include "gc/gc_9_0_sh_mask.h"
37 #include "dce/dce_12_0_offset.h"
38 #include "dce/dce_12_0_sh_mask.h"
39 #include "vega10_enum.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "athub/athub_1_0_offset.h"
42 #include "oss/osssys_4_0_offset.h"
43 
44 #include "soc15.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
47 
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
53 #include "umc_v6_1.h"
54 #include "umc_v6_0.h"
55 
56 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
57 
58 #include "amdgpu_ras.h"
59 #include "amdgpu_xgmi.h"
60 
61 /* add these here since we already include dce12 headers and these are for DCN */
62 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
63 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
64 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
68 
69 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
70 #define AMDGPU_NUM_OF_VMIDS			8
71 
72 static const u32 golden_settings_vega10_hdp[] =
73 {
74 	0xf64, 0x0fffffff, 0x00000000,
75 	0xf65, 0x0fffffff, 0x00000000,
76 	0xf66, 0x0fffffff, 0x00000000,
77 	0xf67, 0x0fffffff, 0x00000000,
78 	0xf68, 0x0fffffff, 0x00000000,
79 	0xf6a, 0x0fffffff, 0x00000000,
80 	0xf6b, 0x0fffffff, 0x00000000,
81 	0xf6c, 0x0fffffff, 0x00000000,
82 	0xf6d, 0x0fffffff, 0x00000000,
83 	0xf6e, 0x0fffffff, 0x00000000,
84 };
85 
86 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
87 {
88 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
89 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
90 };
91 
92 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
93 {
94 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
95 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
96 };
97 
98 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
99 	(0x000143c0 + 0x00000000),
100 	(0x000143c0 + 0x00000800),
101 	(0x000143c0 + 0x00001000),
102 	(0x000143c0 + 0x00001800),
103 	(0x000543c0 + 0x00000000),
104 	(0x000543c0 + 0x00000800),
105 	(0x000543c0 + 0x00001000),
106 	(0x000543c0 + 0x00001800),
107 	(0x000943c0 + 0x00000000),
108 	(0x000943c0 + 0x00000800),
109 	(0x000943c0 + 0x00001000),
110 	(0x000943c0 + 0x00001800),
111 	(0x000d43c0 + 0x00000000),
112 	(0x000d43c0 + 0x00000800),
113 	(0x000d43c0 + 0x00001000),
114 	(0x000d43c0 + 0x00001800),
115 	(0x001143c0 + 0x00000000),
116 	(0x001143c0 + 0x00000800),
117 	(0x001143c0 + 0x00001000),
118 	(0x001143c0 + 0x00001800),
119 	(0x001543c0 + 0x00000000),
120 	(0x001543c0 + 0x00000800),
121 	(0x001543c0 + 0x00001000),
122 	(0x001543c0 + 0x00001800),
123 	(0x001943c0 + 0x00000000),
124 	(0x001943c0 + 0x00000800),
125 	(0x001943c0 + 0x00001000),
126 	(0x001943c0 + 0x00001800),
127 	(0x001d43c0 + 0x00000000),
128 	(0x001d43c0 + 0x00000800),
129 	(0x001d43c0 + 0x00001000),
130 	(0x001d43c0 + 0x00001800),
131 };
132 
133 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
134 	(0x000143e0 + 0x00000000),
135 	(0x000143e0 + 0x00000800),
136 	(0x000143e0 + 0x00001000),
137 	(0x000143e0 + 0x00001800),
138 	(0x000543e0 + 0x00000000),
139 	(0x000543e0 + 0x00000800),
140 	(0x000543e0 + 0x00001000),
141 	(0x000543e0 + 0x00001800),
142 	(0x000943e0 + 0x00000000),
143 	(0x000943e0 + 0x00000800),
144 	(0x000943e0 + 0x00001000),
145 	(0x000943e0 + 0x00001800),
146 	(0x000d43e0 + 0x00000000),
147 	(0x000d43e0 + 0x00000800),
148 	(0x000d43e0 + 0x00001000),
149 	(0x000d43e0 + 0x00001800),
150 	(0x001143e0 + 0x00000000),
151 	(0x001143e0 + 0x00000800),
152 	(0x001143e0 + 0x00001000),
153 	(0x001143e0 + 0x00001800),
154 	(0x001543e0 + 0x00000000),
155 	(0x001543e0 + 0x00000800),
156 	(0x001543e0 + 0x00001000),
157 	(0x001543e0 + 0x00001800),
158 	(0x001943e0 + 0x00000000),
159 	(0x001943e0 + 0x00000800),
160 	(0x001943e0 + 0x00001000),
161 	(0x001943e0 + 0x00001800),
162 	(0x001d43e0 + 0x00000000),
163 	(0x001d43e0 + 0x00000800),
164 	(0x001d43e0 + 0x00001000),
165 	(0x001d43e0 + 0x00001800),
166 };
167 
168 static const uint32_t ecc_umc_mcumc_status_addrs[] = {
169 	(0x000143c2 + 0x00000000),
170 	(0x000143c2 + 0x00000800),
171 	(0x000143c2 + 0x00001000),
172 	(0x000143c2 + 0x00001800),
173 	(0x000543c2 + 0x00000000),
174 	(0x000543c2 + 0x00000800),
175 	(0x000543c2 + 0x00001000),
176 	(0x000543c2 + 0x00001800),
177 	(0x000943c2 + 0x00000000),
178 	(0x000943c2 + 0x00000800),
179 	(0x000943c2 + 0x00001000),
180 	(0x000943c2 + 0x00001800),
181 	(0x000d43c2 + 0x00000000),
182 	(0x000d43c2 + 0x00000800),
183 	(0x000d43c2 + 0x00001000),
184 	(0x000d43c2 + 0x00001800),
185 	(0x001143c2 + 0x00000000),
186 	(0x001143c2 + 0x00000800),
187 	(0x001143c2 + 0x00001000),
188 	(0x001143c2 + 0x00001800),
189 	(0x001543c2 + 0x00000000),
190 	(0x001543c2 + 0x00000800),
191 	(0x001543c2 + 0x00001000),
192 	(0x001543c2 + 0x00001800),
193 	(0x001943c2 + 0x00000000),
194 	(0x001943c2 + 0x00000800),
195 	(0x001943c2 + 0x00001000),
196 	(0x001943c2 + 0x00001800),
197 	(0x001d43c2 + 0x00000000),
198 	(0x001d43c2 + 0x00000800),
199 	(0x001d43c2 + 0x00001000),
200 	(0x001d43c2 + 0x00001800),
201 };
202 
203 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
204 		struct amdgpu_irq_src *src,
205 		unsigned type,
206 		enum amdgpu_interrupt_state state)
207 {
208 	u32 bits, i, tmp, reg;
209 
210 	bits = 0x7f;
211 
212 	switch (state) {
213 	case AMDGPU_IRQ_STATE_DISABLE:
214 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
215 			reg = ecc_umc_mcumc_ctrl_addrs[i];
216 			tmp = RREG32(reg);
217 			tmp &= ~bits;
218 			WREG32(reg, tmp);
219 		}
220 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
221 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
222 			tmp = RREG32(reg);
223 			tmp &= ~bits;
224 			WREG32(reg, tmp);
225 		}
226 		break;
227 	case AMDGPU_IRQ_STATE_ENABLE:
228 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
229 			reg = ecc_umc_mcumc_ctrl_addrs[i];
230 			tmp = RREG32(reg);
231 			tmp |= bits;
232 			WREG32(reg, tmp);
233 		}
234 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
235 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
236 			tmp = RREG32(reg);
237 			tmp |= bits;
238 			WREG32(reg, tmp);
239 		}
240 		break;
241 	default:
242 		break;
243 	}
244 
245 	return 0;
246 }
247 
248 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
249 					struct amdgpu_irq_src *src,
250 					unsigned type,
251 					enum amdgpu_interrupt_state state)
252 {
253 	struct amdgpu_vmhub *hub;
254 	u32 tmp, reg, bits, i, j;
255 
256 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
257 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
258 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
259 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
260 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
261 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
262 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
263 
264 	switch (state) {
265 	case AMDGPU_IRQ_STATE_DISABLE:
266 		for (j = 0; j < adev->num_vmhubs; j++) {
267 			hub = &adev->vmhub[j];
268 			for (i = 0; i < 16; i++) {
269 				reg = hub->vm_context0_cntl + i;
270 				tmp = RREG32(reg);
271 				tmp &= ~bits;
272 				WREG32(reg, tmp);
273 			}
274 		}
275 		break;
276 	case AMDGPU_IRQ_STATE_ENABLE:
277 		for (j = 0; j < adev->num_vmhubs; j++) {
278 			hub = &adev->vmhub[j];
279 			for (i = 0; i < 16; i++) {
280 				reg = hub->vm_context0_cntl + i;
281 				tmp = RREG32(reg);
282 				tmp |= bits;
283 				WREG32(reg, tmp);
284 			}
285 		}
286 	default:
287 		break;
288 	}
289 
290 	return 0;
291 }
292 
293 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
294 				struct amdgpu_irq_src *source,
295 				struct amdgpu_iv_entry *entry)
296 {
297 	struct amdgpu_vmhub *hub;
298 	bool retry_fault = !!(entry->src_data[1] & 0x80);
299 	uint32_t status = 0;
300 	u64 addr;
301 	char hub_name[10];
302 
303 	addr = (u64)entry->src_data[0] << 12;
304 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
305 
306 	if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
307 						    entry->timestamp))
308 		return 1; /* This also prevents sending it to KFD */
309 
310 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
311 		snprintf(hub_name, sizeof(hub_name), "mmhub0");
312 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
313 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
314 		snprintf(hub_name, sizeof(hub_name), "mmhub1");
315 		hub = &adev->vmhub[AMDGPU_MMHUB_1];
316 	} else {
317 		snprintf(hub_name, sizeof(hub_name), "gfxhub0");
318 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
319 	}
320 
321 	/* If it's the first fault for this address, process it normally */
322 	if (retry_fault && !in_interrupt() &&
323 	    amdgpu_vm_handle_fault(adev, entry->pasid, addr))
324 		return 1; /* This also prevents sending it to KFD */
325 
326 	if (!amdgpu_sriov_vf(adev)) {
327 		/*
328 		 * Issue a dummy read to wait for the status register to
329 		 * be updated to avoid reading an incorrect value due to
330 		 * the new fast GRBM interface.
331 		 */
332 		if (entry->vmid_src == AMDGPU_GFXHUB_0)
333 			RREG32(hub->vm_l2_pro_fault_status);
334 
335 		status = RREG32(hub->vm_l2_pro_fault_status);
336 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
337 	}
338 
339 	if (printk_ratelimit()) {
340 		struct amdgpu_task_info task_info;
341 
342 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
343 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
344 
345 		dev_err(adev->dev,
346 			"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
347 			"pasid:%u, for process %s pid %d thread %s pid %d)\n",
348 			hub_name, retry_fault ? "retry" : "no-retry",
349 			entry->src_id, entry->ring_id, entry->vmid,
350 			entry->pasid, task_info.process_name, task_info.tgid,
351 			task_info.task_name, task_info.pid);
352 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
353 			addr, entry->client_id);
354 		if (!amdgpu_sriov_vf(adev)) {
355 			dev_err(adev->dev,
356 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
357 				status);
358 			dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
359 				REG_GET_FIELD(status,
360 				VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
361 			dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
362 				REG_GET_FIELD(status,
363 				VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
364 			dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
365 				REG_GET_FIELD(status,
366 				VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
367 			dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
368 				REG_GET_FIELD(status,
369 				VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
370 			dev_err(adev->dev, "\t RW: 0x%lx\n",
371 				REG_GET_FIELD(status,
372 				VM_L2_PROTECTION_FAULT_STATUS, RW));
373 
374 		}
375 	}
376 
377 	return 0;
378 }
379 
380 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
381 	.set = gmc_v9_0_vm_fault_interrupt_state,
382 	.process = gmc_v9_0_process_interrupt,
383 };
384 
385 
386 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
387 	.set = gmc_v9_0_ecc_interrupt_state,
388 	.process = amdgpu_umc_process_ecc_irq,
389 };
390 
391 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
392 {
393 	adev->gmc.vm_fault.num_types = 1;
394 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
395 
396 	adev->gmc.ecc_irq.num_types = 1;
397 	adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
398 }
399 
400 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
401 					uint32_t flush_type)
402 {
403 	u32 req = 0;
404 
405 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
406 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
407 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
408 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
409 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
410 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
411 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
412 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
413 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
414 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
415 
416 	return req;
417 }
418 
419 /*
420  * GART
421  * VMID 0 is the physical GPU addresses as used by the kernel.
422  * VMIDs 1-15 are used for userspace clients and are handled
423  * by the amdgpu vm/hsa code.
424  */
425 
426 /**
427  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
428  *
429  * @adev: amdgpu_device pointer
430  * @vmid: vm instance to flush
431  * @flush_type: the flush type
432  *
433  * Flush the TLB for the requested page table using certain type.
434  */
435 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
436 					uint32_t vmhub, uint32_t flush_type)
437 {
438 	const unsigned eng = 17;
439 	u32 j, tmp;
440 	struct amdgpu_vmhub *hub;
441 
442 	BUG_ON(vmhub >= adev->num_vmhubs);
443 
444 	hub = &adev->vmhub[vmhub];
445 	tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
446 
447 	/* This is necessary for a HW workaround under SRIOV as well
448 	 * as GFXOFF under bare metal
449 	 */
450 	if (adev->gfx.kiq.ring.sched.ready &&
451 			(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
452 			!adev->in_gpu_reset) {
453 		uint32_t req = hub->vm_inv_eng0_req + eng;
454 		uint32_t ack = hub->vm_inv_eng0_ack + eng;
455 
456 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
457 				1 << vmid);
458 		return;
459 	}
460 
461 	spin_lock(&adev->gmc.invalidate_lock);
462 	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
463 
464 	/*
465 	 * Issue a dummy read to wait for the ACK register to be cleared
466 	 * to avoid a false ACK due to the new fast GRBM interface.
467 	 */
468 	if (vmhub == AMDGPU_GFXHUB_0)
469 		RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
470 
471 	for (j = 0; j < adev->usec_timeout; j++) {
472 		tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
473 		if (tmp & (1 << vmid))
474 			break;
475 		udelay(1);
476 	}
477 	spin_unlock(&adev->gmc.invalidate_lock);
478 	if (j < adev->usec_timeout)
479 		return;
480 
481 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
482 }
483 
484 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
485 					    unsigned vmid, uint64_t pd_addr)
486 {
487 	struct amdgpu_device *adev = ring->adev;
488 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
489 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
490 	unsigned eng = ring->vm_inv_eng;
491 
492 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
493 			      lower_32_bits(pd_addr));
494 
495 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
496 			      upper_32_bits(pd_addr));
497 
498 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
499 					    hub->vm_inv_eng0_ack + eng,
500 					    req, 1 << vmid);
501 
502 	return pd_addr;
503 }
504 
505 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
506 					unsigned pasid)
507 {
508 	struct amdgpu_device *adev = ring->adev;
509 	uint32_t reg;
510 
511 	/* Do nothing because there's no lut register for mmhub1. */
512 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
513 		return;
514 
515 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
516 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
517 	else
518 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
519 
520 	amdgpu_ring_emit_wreg(ring, reg, pasid);
521 }
522 
523 /*
524  * PTE format on VEGA 10:
525  * 63:59 reserved
526  * 58:57 mtype
527  * 56 F
528  * 55 L
529  * 54 P
530  * 53 SW
531  * 52 T
532  * 50:48 reserved
533  * 47:12 4k physical page base address
534  * 11:7 fragment
535  * 6 write
536  * 5 read
537  * 4 exe
538  * 3 Z
539  * 2 snooped
540  * 1 system
541  * 0 valid
542  *
543  * PDE format on VEGA 10:
544  * 63:59 block fragment size
545  * 58:55 reserved
546  * 54 P
547  * 53:48 reserved
548  * 47:6 physical base address of PD or PTE
549  * 5:3 reserved
550  * 2 C
551  * 1 system
552  * 0 valid
553  */
554 
555 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
556 
557 {
558 	switch (flags) {
559 	case AMDGPU_VM_MTYPE_DEFAULT:
560 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
561 	case AMDGPU_VM_MTYPE_NC:
562 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
563 	case AMDGPU_VM_MTYPE_WC:
564 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
565 	case AMDGPU_VM_MTYPE_RW:
566 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
567 	case AMDGPU_VM_MTYPE_CC:
568 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
569 	case AMDGPU_VM_MTYPE_UC:
570 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
571 	default:
572 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
573 	}
574 }
575 
576 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
577 				uint64_t *addr, uint64_t *flags)
578 {
579 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
580 		*addr = adev->vm_manager.vram_base_offset + *addr -
581 			adev->gmc.vram_start;
582 	BUG_ON(*addr & 0xFFFF00000000003FULL);
583 
584 	if (!adev->gmc.translate_further)
585 		return;
586 
587 	if (level == AMDGPU_VM_PDB1) {
588 		/* Set the block fragment size */
589 		if (!(*flags & AMDGPU_PDE_PTE))
590 			*flags |= AMDGPU_PDE_BFS(0x9);
591 
592 	} else if (level == AMDGPU_VM_PDB0) {
593 		if (*flags & AMDGPU_PDE_PTE)
594 			*flags &= ~AMDGPU_PDE_PTE;
595 		else
596 			*flags |= AMDGPU_PTE_TF;
597 	}
598 }
599 
600 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
601 				struct amdgpu_bo_va_mapping *mapping,
602 				uint64_t *flags)
603 {
604 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
605 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
606 
607 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
608 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
609 
610 	if (mapping->flags & AMDGPU_PTE_PRT) {
611 		*flags |= AMDGPU_PTE_PRT;
612 		*flags &= ~AMDGPU_PTE_VALID;
613 	}
614 
615 	if (adev->asic_type == CHIP_ARCTURUS &&
616 	    !(*flags & AMDGPU_PTE_SYSTEM) &&
617 	    mapping->bo_va->is_xgmi)
618 		*flags |= AMDGPU_PTE_SNOOPED;
619 }
620 
621 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
622 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
623 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
624 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
625 	.map_mtype = gmc_v9_0_map_mtype,
626 	.get_vm_pde = gmc_v9_0_get_vm_pde,
627 	.get_vm_pte = gmc_v9_0_get_vm_pte
628 };
629 
630 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
631 {
632 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
633 }
634 
635 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
636 {
637 	switch (adev->asic_type) {
638 	case CHIP_VEGA10:
639 		adev->umc.funcs = &umc_v6_0_funcs;
640 		break;
641 	case CHIP_VEGA20:
642 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
643 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
644 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
645 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET;
646 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
647 		adev->umc.funcs = &umc_v6_1_funcs;
648 		break;
649 	default:
650 		break;
651 	}
652 }
653 
654 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
655 {
656 	switch (adev->asic_type) {
657 	case CHIP_VEGA20:
658 		adev->mmhub_funcs = &mmhub_v1_0_funcs;
659 		break;
660 	default:
661 		break;
662 	}
663 }
664 
665 static int gmc_v9_0_early_init(void *handle)
666 {
667 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
668 
669 	gmc_v9_0_set_gmc_funcs(adev);
670 	gmc_v9_0_set_irq_funcs(adev);
671 	gmc_v9_0_set_umc_funcs(adev);
672 	gmc_v9_0_set_mmhub_funcs(adev);
673 
674 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
675 	adev->gmc.shared_aperture_end =
676 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
677 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
678 	adev->gmc.private_aperture_end =
679 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
680 
681 	return 0;
682 }
683 
684 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
685 {
686 
687 	/*
688 	 * TODO:
689 	 * Currently there is a bug where some memory client outside
690 	 * of the driver writes to first 8M of VRAM on S3 resume,
691 	 * this overrides GART which by default gets placed in first 8M and
692 	 * causes VM_FAULTS once GTT is accessed.
693 	 * Keep the stolen memory reservation until the while this is not solved.
694 	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
695 	 */
696 	switch (adev->asic_type) {
697 	case CHIP_VEGA10:
698 	case CHIP_RAVEN:
699 	case CHIP_ARCTURUS:
700 	case CHIP_RENOIR:
701 		return true;
702 	case CHIP_VEGA12:
703 	case CHIP_VEGA20:
704 	default:
705 		return false;
706 	}
707 }
708 
709 static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
710 {
711 	struct amdgpu_ring *ring;
712 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
713 		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
714 		GFXHUB_FREE_VM_INV_ENGS_BITMAP};
715 	unsigned i;
716 	unsigned vmhub, inv_eng;
717 
718 	for (i = 0; i < adev->num_rings; ++i) {
719 		ring = adev->rings[i];
720 		vmhub = ring->funcs->vmhub;
721 
722 		inv_eng = ffs(vm_inv_engs[vmhub]);
723 		if (!inv_eng) {
724 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
725 				ring->name);
726 			return -EINVAL;
727 		}
728 
729 		ring->vm_inv_eng = inv_eng - 1;
730 		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
731 
732 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
733 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
734 	}
735 
736 	return 0;
737 }
738 
739 static int gmc_v9_0_ecc_late_init(void *handle)
740 {
741 	int r;
742 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
743 	struct ras_ih_if umc_ih_info = {
744 		.cb = amdgpu_umc_process_ras_data_cb,
745 	};
746 
747 	if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
748 		r = adev->umc.funcs->ras_late_init(adev, &umc_ih_info);
749 		if (r)
750 			return r;
751 	}
752 
753 	if (adev->mmhub_funcs && adev->mmhub_funcs->ras_late_init) {
754 		r = adev->mmhub_funcs->ras_late_init(adev);
755 		if (r)
756 			return r;
757 	}
758 
759 	return amdgpu_xgmi_ras_late_init(adev);
760 }
761 
762 static int gmc_v9_0_late_init(void *handle)
763 {
764 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
765 	int r;
766 
767 	if (!gmc_v9_0_keep_stolen_memory(adev))
768 		amdgpu_bo_late_init(adev);
769 
770 	r = gmc_v9_0_allocate_vm_inv_eng(adev);
771 	if (r)
772 		return r;
773 	/* Check if ecc is available */
774 	if (!amdgpu_sriov_vf(adev)) {
775 		switch (adev->asic_type) {
776 		case CHIP_VEGA10:
777 		case CHIP_VEGA20:
778 			r = amdgpu_atomfirmware_mem_ecc_supported(adev);
779 			if (!r) {
780 				DRM_INFO("ECC is not present.\n");
781 				if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
782 					adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
783 			} else {
784 				DRM_INFO("ECC is active.\n");
785 			}
786 
787 			r = amdgpu_atomfirmware_sram_ecc_supported(adev);
788 			if (!r) {
789 				DRM_INFO("SRAM ECC is not present.\n");
790 			} else {
791 				DRM_INFO("SRAM ECC is active.\n");
792 			}
793 			break;
794 		default:
795 			break;
796 		}
797 	}
798 
799 	r = gmc_v9_0_ecc_late_init(handle);
800 	if (r)
801 		return r;
802 
803 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
804 }
805 
806 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
807 					struct amdgpu_gmc *mc)
808 {
809 	u64 base = 0;
810 
811 	if (adev->asic_type == CHIP_ARCTURUS)
812 		base = mmhub_v9_4_get_fb_location(adev);
813 	else if (!amdgpu_sriov_vf(adev))
814 		base = mmhub_v1_0_get_fb_location(adev);
815 
816 	/* add the xgmi offset of the physical node */
817 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
818 	amdgpu_gmc_vram_location(adev, mc, base);
819 	amdgpu_gmc_gart_location(adev, mc);
820 	amdgpu_gmc_agp_location(adev, mc);
821 	/* base offset of vram pages */
822 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
823 
824 	/* XXX: add the xgmi offset of the physical node? */
825 	adev->vm_manager.vram_base_offset +=
826 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
827 }
828 
829 /**
830  * gmc_v9_0_mc_init - initialize the memory controller driver params
831  *
832  * @adev: amdgpu_device pointer
833  *
834  * Look up the amount of vram, vram width, and decide how to place
835  * vram and gart within the GPU's physical address space.
836  * Returns 0 for success.
837  */
838 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
839 {
840 	int r;
841 
842 	/* size in MB on si */
843 	adev->gmc.mc_vram_size =
844 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
845 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
846 
847 	if (!(adev->flags & AMD_IS_APU)) {
848 		r = amdgpu_device_resize_fb_bar(adev);
849 		if (r)
850 			return r;
851 	}
852 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
853 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
854 
855 #ifdef CONFIG_X86_64
856 	if (adev->flags & AMD_IS_APU) {
857 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
858 		adev->gmc.aper_size = adev->gmc.real_vram_size;
859 	}
860 #endif
861 	/* In case the PCI BAR is larger than the actual amount of vram */
862 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
863 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
864 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
865 
866 	/* set the gart size */
867 	if (amdgpu_gart_size == -1) {
868 		switch (adev->asic_type) {
869 		case CHIP_VEGA10:  /* all engines support GPUVM */
870 		case CHIP_VEGA12:  /* all engines support GPUVM */
871 		case CHIP_VEGA20:
872 		case CHIP_ARCTURUS:
873 		default:
874 			adev->gmc.gart_size = 512ULL << 20;
875 			break;
876 		case CHIP_RAVEN:   /* DCE SG support */
877 		case CHIP_RENOIR:
878 			adev->gmc.gart_size = 1024ULL << 20;
879 			break;
880 		}
881 	} else {
882 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
883 	}
884 
885 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
886 
887 	return 0;
888 }
889 
890 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
891 {
892 	int r;
893 
894 	if (adev->gart.bo) {
895 		WARN(1, "VEGA10 PCIE GART already initialized\n");
896 		return 0;
897 	}
898 	/* Initialize common gart structure */
899 	r = amdgpu_gart_init(adev);
900 	if (r)
901 		return r;
902 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
903 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
904 				 AMDGPU_PTE_EXECUTABLE;
905 	return amdgpu_gart_table_vram_alloc(adev);
906 }
907 
908 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
909 {
910 	u32 d1vga_control;
911 	unsigned size;
912 
913 	/*
914 	 * TODO Remove once GART corruption is resolved
915 	 * Check related code in gmc_v9_0_sw_fini
916 	 * */
917 	if (gmc_v9_0_keep_stolen_memory(adev))
918 		return 9 * 1024 * 1024;
919 
920 	d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
921 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
922 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
923 	} else {
924 		u32 viewport;
925 
926 		switch (adev->asic_type) {
927 		case CHIP_RAVEN:
928 		case CHIP_RENOIR:
929 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
930 			size = (REG_GET_FIELD(viewport,
931 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
932 				REG_GET_FIELD(viewport,
933 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
934 				4);
935 			break;
936 		case CHIP_VEGA10:
937 		case CHIP_VEGA12:
938 		case CHIP_VEGA20:
939 		default:
940 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
941 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
942 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
943 				4);
944 			break;
945 		}
946 	}
947 	/* return 0 if the pre-OS buffer uses up most of vram */
948 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
949 		return 0;
950 
951 	return size;
952 }
953 
954 static int gmc_v9_0_sw_init(void *handle)
955 {
956 	int r, vram_width = 0, vram_type = 0;
957 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
958 
959 	gfxhub_v1_0_init(adev);
960 	if (adev->asic_type == CHIP_ARCTURUS)
961 		mmhub_v9_4_init(adev);
962 	else
963 		mmhub_v1_0_init(adev);
964 
965 	spin_lock_init(&adev->gmc.invalidate_lock);
966 
967 	r = amdgpu_atomfirmware_get_vram_info(adev, &vram_width, &vram_type);
968 	if (amdgpu_sriov_vf(adev))
969 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
970 		 * and DF related registers is not readable, seems hardcord is the
971 		 * only way to set the correct vram_width
972 		 */
973 		adev->gmc.vram_width = 2048;
974 	else if (amdgpu_emu_mode != 1)
975 		adev->gmc.vram_width = vram_width;
976 
977 	if (!adev->gmc.vram_width) {
978 		int chansize, numchan;
979 
980 		/* hbm memory channel size */
981 		if (adev->flags & AMD_IS_APU)
982 			chansize = 64;
983 		else
984 			chansize = 128;
985 
986 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
987 		adev->gmc.vram_width = numchan * chansize;
988 	}
989 
990 	adev->gmc.vram_type = vram_type;
991 	switch (adev->asic_type) {
992 	case CHIP_RAVEN:
993 		adev->num_vmhubs = 2;
994 
995 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
996 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
997 		} else {
998 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
999 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1000 			adev->gmc.translate_further =
1001 				adev->vm_manager.num_level > 1;
1002 		}
1003 		break;
1004 	case CHIP_VEGA10:
1005 	case CHIP_VEGA12:
1006 	case CHIP_VEGA20:
1007 	case CHIP_RENOIR:
1008 		adev->num_vmhubs = 2;
1009 
1010 
1011 		/*
1012 		 * To fulfill 4-level page support,
1013 		 * vm size is 256TB (48bit), maximum size of Vega10,
1014 		 * block size 512 (9bit)
1015 		 */
1016 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1017 		if (amdgpu_sriov_vf(adev))
1018 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1019 		else
1020 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1021 		break;
1022 	case CHIP_ARCTURUS:
1023 		adev->num_vmhubs = 3;
1024 
1025 		/* Keep the vm size same with Vega20 */
1026 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1027 		break;
1028 	default:
1029 		break;
1030 	}
1031 
1032 	/* This interrupt is VMC page fault.*/
1033 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1034 				&adev->gmc.vm_fault);
1035 	if (r)
1036 		return r;
1037 
1038 	if (adev->asic_type == CHIP_ARCTURUS) {
1039 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1040 					&adev->gmc.vm_fault);
1041 		if (r)
1042 			return r;
1043 	}
1044 
1045 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1046 				&adev->gmc.vm_fault);
1047 
1048 	if (r)
1049 		return r;
1050 
1051 	/* interrupt sent to DF. */
1052 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1053 			&adev->gmc.ecc_irq);
1054 	if (r)
1055 		return r;
1056 
1057 	/* Set the internal MC address mask
1058 	 * This is the max address of the GPU's
1059 	 * internal address space.
1060 	 */
1061 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1062 
1063 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1064 	if (r) {
1065 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1066 		return r;
1067 	}
1068 	adev->need_swiotlb = drm_need_swiotlb(44);
1069 
1070 	if (adev->gmc.xgmi.supported) {
1071 		r = gfxhub_v1_1_get_xgmi_info(adev);
1072 		if (r)
1073 			return r;
1074 	}
1075 
1076 	r = gmc_v9_0_mc_init(adev);
1077 	if (r)
1078 		return r;
1079 
1080 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1081 
1082 	/* Memory manager */
1083 	r = amdgpu_bo_init(adev);
1084 	if (r)
1085 		return r;
1086 
1087 	r = gmc_v9_0_gart_init(adev);
1088 	if (r)
1089 		return r;
1090 
1091 	/*
1092 	 * number of VMs
1093 	 * VMID 0 is reserved for System
1094 	 * amdgpu graphics/compute will use VMIDs 1-7
1095 	 * amdkfd will use VMIDs 8-15
1096 	 */
1097 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1098 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1099 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
1100 
1101 	amdgpu_vm_manager_init(adev);
1102 
1103 	return 0;
1104 }
1105 
1106 static int gmc_v9_0_sw_fini(void *handle)
1107 {
1108 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1109 	void *stolen_vga_buf;
1110 
1111 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1112 			adev->umc.ras_if) {
1113 		struct ras_common_if *ras_if = adev->umc.ras_if;
1114 		struct ras_ih_if ih_info = {
1115 			.head = *ras_if,
1116 		};
1117 
1118 		/* remove fs first */
1119 		amdgpu_ras_debugfs_remove(adev, ras_if);
1120 		amdgpu_ras_sysfs_remove(adev, ras_if);
1121 		/* remove the IH */
1122 		amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1123 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1124 		kfree(ras_if);
1125 	}
1126 
1127 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
1128 			adev->gmc.mmhub_ras_if) {
1129 		struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
1130 
1131 		/* remove fs and disable ras feature */
1132 		amdgpu_ras_debugfs_remove(adev, ras_if);
1133 		amdgpu_ras_sysfs_remove(adev, ras_if);
1134 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1135 		kfree(ras_if);
1136 	}
1137 
1138 	amdgpu_gem_force_release(adev);
1139 	amdgpu_vm_manager_fini(adev);
1140 
1141 	if (gmc_v9_0_keep_stolen_memory(adev))
1142 		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
1143 
1144 	amdgpu_gart_table_vram_free(adev);
1145 	amdgpu_bo_fini(adev);
1146 	amdgpu_gart_fini(adev);
1147 
1148 	return 0;
1149 }
1150 
1151 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1152 {
1153 
1154 	switch (adev->asic_type) {
1155 	case CHIP_VEGA10:
1156 		if (amdgpu_sriov_vf(adev))
1157 			break;
1158 		/* fall through */
1159 	case CHIP_VEGA20:
1160 		soc15_program_register_sequence(adev,
1161 						golden_settings_mmhub_1_0_0,
1162 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1163 		soc15_program_register_sequence(adev,
1164 						golden_settings_athub_1_0_0,
1165 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1166 		break;
1167 	case CHIP_VEGA12:
1168 		break;
1169 	case CHIP_RAVEN:
1170 		/* TODO for renoir */
1171 		soc15_program_register_sequence(adev,
1172 						golden_settings_athub_1_0_0,
1173 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1174 		break;
1175 	default:
1176 		break;
1177 	}
1178 }
1179 
1180 /**
1181  * gmc_v9_0_gart_enable - gart enable
1182  *
1183  * @adev: amdgpu_device pointer
1184  */
1185 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1186 {
1187 	int r, i;
1188 	bool value;
1189 	u32 tmp;
1190 
1191 	amdgpu_device_program_register_sequence(adev,
1192 						golden_settings_vega10_hdp,
1193 						ARRAY_SIZE(golden_settings_vega10_hdp));
1194 
1195 	if (adev->gart.bo == NULL) {
1196 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1197 		return -EINVAL;
1198 	}
1199 	r = amdgpu_gart_table_vram_pin(adev);
1200 	if (r)
1201 		return r;
1202 
1203 	switch (adev->asic_type) {
1204 	case CHIP_RAVEN:
1205 		/* TODO for renoir */
1206 		mmhub_v1_0_update_power_gating(adev, true);
1207 		break;
1208 	default:
1209 		break;
1210 	}
1211 
1212 	r = gfxhub_v1_0_gart_enable(adev);
1213 	if (r)
1214 		return r;
1215 
1216 	if (adev->asic_type == CHIP_ARCTURUS)
1217 		r = mmhub_v9_4_gart_enable(adev);
1218 	else
1219 		r = mmhub_v1_0_gart_enable(adev);
1220 	if (r)
1221 		return r;
1222 
1223 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1224 
1225 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1226 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1227 
1228 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1229 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1230 
1231 	/* After HDP is initialized, flush HDP.*/
1232 	adev->nbio.funcs->hdp_flush(adev, NULL);
1233 
1234 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1235 		value = false;
1236 	else
1237 		value = true;
1238 
1239 	gfxhub_v1_0_set_fault_enable_default(adev, value);
1240 	if (adev->asic_type == CHIP_ARCTURUS)
1241 		mmhub_v9_4_set_fault_enable_default(adev, value);
1242 	else
1243 		mmhub_v1_0_set_fault_enable_default(adev, value);
1244 
1245 	for (i = 0; i < adev->num_vmhubs; ++i)
1246 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1247 
1248 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1249 		adev->umc.funcs->init_registers(adev);
1250 
1251 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1252 		 (unsigned)(adev->gmc.gart_size >> 20),
1253 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1254 	adev->gart.ready = true;
1255 	return 0;
1256 }
1257 
1258 static int gmc_v9_0_hw_init(void *handle)
1259 {
1260 	int r;
1261 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1262 
1263 	/* The sequence of these two function calls matters.*/
1264 	gmc_v9_0_init_golden_registers(adev);
1265 
1266 	if (adev->mode_info.num_crtc) {
1267 		/* Lockout access through VGA aperture*/
1268 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1269 
1270 		/* disable VGA render */
1271 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1272 	}
1273 
1274 	r = gmc_v9_0_gart_enable(adev);
1275 
1276 	return r;
1277 }
1278 
1279 /**
1280  * gmc_v9_0_gart_disable - gart disable
1281  *
1282  * @adev: amdgpu_device pointer
1283  *
1284  * This disables all VM page table.
1285  */
1286 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1287 {
1288 	gfxhub_v1_0_gart_disable(adev);
1289 	if (adev->asic_type == CHIP_ARCTURUS)
1290 		mmhub_v9_4_gart_disable(adev);
1291 	else
1292 		mmhub_v1_0_gart_disable(adev);
1293 	amdgpu_gart_table_vram_unpin(adev);
1294 }
1295 
1296 static int gmc_v9_0_hw_fini(void *handle)
1297 {
1298 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1299 
1300 	if (amdgpu_sriov_vf(adev)) {
1301 		/* full access mode, so don't touch any GMC register */
1302 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1303 		return 0;
1304 	}
1305 
1306 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1307 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1308 	gmc_v9_0_gart_disable(adev);
1309 
1310 	return 0;
1311 }
1312 
1313 static int gmc_v9_0_suspend(void *handle)
1314 {
1315 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316 
1317 	return gmc_v9_0_hw_fini(adev);
1318 }
1319 
1320 static int gmc_v9_0_resume(void *handle)
1321 {
1322 	int r;
1323 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1324 
1325 	r = gmc_v9_0_hw_init(adev);
1326 	if (r)
1327 		return r;
1328 
1329 	amdgpu_vmid_reset_all(adev);
1330 
1331 	return 0;
1332 }
1333 
1334 static bool gmc_v9_0_is_idle(void *handle)
1335 {
1336 	/* MC is always ready in GMC v9.*/
1337 	return true;
1338 }
1339 
1340 static int gmc_v9_0_wait_for_idle(void *handle)
1341 {
1342 	/* There is no need to wait for MC idle in GMC v9.*/
1343 	return 0;
1344 }
1345 
1346 static int gmc_v9_0_soft_reset(void *handle)
1347 {
1348 	/* XXX for emulation.*/
1349 	return 0;
1350 }
1351 
1352 static int gmc_v9_0_set_clockgating_state(void *handle,
1353 					enum amd_clockgating_state state)
1354 {
1355 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1356 
1357 	if (adev->asic_type == CHIP_ARCTURUS)
1358 		mmhub_v9_4_set_clockgating(adev, state);
1359 	else
1360 		mmhub_v1_0_set_clockgating(adev, state);
1361 
1362 	athub_v1_0_set_clockgating(adev, state);
1363 
1364 	return 0;
1365 }
1366 
1367 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1368 {
1369 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1370 
1371 	if (adev->asic_type == CHIP_ARCTURUS)
1372 		mmhub_v9_4_get_clockgating(adev, flags);
1373 	else
1374 		mmhub_v1_0_get_clockgating(adev, flags);
1375 
1376 	athub_v1_0_get_clockgating(adev, flags);
1377 }
1378 
1379 static int gmc_v9_0_set_powergating_state(void *handle,
1380 					enum amd_powergating_state state)
1381 {
1382 	return 0;
1383 }
1384 
1385 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1386 	.name = "gmc_v9_0",
1387 	.early_init = gmc_v9_0_early_init,
1388 	.late_init = gmc_v9_0_late_init,
1389 	.sw_init = gmc_v9_0_sw_init,
1390 	.sw_fini = gmc_v9_0_sw_fini,
1391 	.hw_init = gmc_v9_0_hw_init,
1392 	.hw_fini = gmc_v9_0_hw_fini,
1393 	.suspend = gmc_v9_0_suspend,
1394 	.resume = gmc_v9_0_resume,
1395 	.is_idle = gmc_v9_0_is_idle,
1396 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1397 	.soft_reset = gmc_v9_0_soft_reset,
1398 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1399 	.set_powergating_state = gmc_v9_0_set_powergating_state,
1400 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1401 };
1402 
1403 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1404 {
1405 	.type = AMD_IP_BLOCK_TYPE_GMC,
1406 	.major = 9,
1407 	.minor = 0,
1408 	.rev = 0,
1409 	.funcs = &gmc_v9_0_ip_funcs,
1410 };
1411