xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c (revision c62d3cd0ddd629606a3830aa22e9dcc6c2a0d3bf)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <drm/drmP.h>
25 #include <drm/drm_cache.h>
26 #include "amdgpu.h"
27 #include "gmc_v6_0.h"
28 #include "amdgpu_ucode.h"
29 #include "amdgpu_gem.h"
30 
31 #include "bif/bif_3_0_d.h"
32 #include "bif/bif_3_0_sh_mask.h"
33 #include "oss/oss_1_0_d.h"
34 #include "oss/oss_1_0_sh_mask.h"
35 #include "gmc/gmc_6_0_d.h"
36 #include "gmc/gmc_6_0_sh_mask.h"
37 #include "dce/dce_6_0_d.h"
38 #include "dce/dce_6_0_sh_mask.h"
39 #include "si_enums.h"
40 
41 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
42 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
43 static int gmc_v6_0_wait_for_idle(void *handle);
44 
45 MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
46 MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
47 MODULE_FIRMWARE("amdgpu/verde_mc.bin");
48 MODULE_FIRMWARE("amdgpu/oland_mc.bin");
49 MODULE_FIRMWARE("amdgpu/si58_mc.bin");
50 
51 #define MC_SEQ_MISC0__MT__MASK   0xf0000000
52 #define MC_SEQ_MISC0__MT__GDDR1  0x10000000
53 #define MC_SEQ_MISC0__MT__DDR2   0x20000000
54 #define MC_SEQ_MISC0__MT__GDDR3  0x30000000
55 #define MC_SEQ_MISC0__MT__GDDR4  0x40000000
56 #define MC_SEQ_MISC0__MT__GDDR5  0x50000000
57 #define MC_SEQ_MISC0__MT__HBM    0x60000000
58 #define MC_SEQ_MISC0__MT__DDR3   0xB0000000
59 
60 
61 static const u32 crtc_offsets[6] =
62 {
63 	SI_CRTC0_REGISTER_OFFSET,
64 	SI_CRTC1_REGISTER_OFFSET,
65 	SI_CRTC2_REGISTER_OFFSET,
66 	SI_CRTC3_REGISTER_OFFSET,
67 	SI_CRTC4_REGISTER_OFFSET,
68 	SI_CRTC5_REGISTER_OFFSET
69 };
70 
71 static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
72 {
73 	u32 blackout;
74 
75 	gmc_v6_0_wait_for_idle((void *)adev);
76 
77 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
78 	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
79 		/* Block CPU access */
80 		WREG32(mmBIF_FB_EN, 0);
81 		/* blackout the MC */
82 		blackout = REG_SET_FIELD(blackout,
83 					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
84 		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
85 	}
86 	/* wait for the MC to settle */
87 	udelay(100);
88 
89 }
90 
91 static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
92 {
93 	u32 tmp;
94 
95 	/* unblackout the MC */
96 	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
97 	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
98 	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
99 	/* allow CPU access */
100 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
101 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
102 	WREG32(mmBIF_FB_EN, tmp);
103 }
104 
105 static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
106 {
107 	const char *chip_name;
108 	char fw_name[30];
109 	int err;
110 	bool is_58_fw = false;
111 
112 	DRM_DEBUG("\n");
113 
114 	switch (adev->asic_type) {
115 	case CHIP_TAHITI:
116 		chip_name = "tahiti";
117 		break;
118 	case CHIP_PITCAIRN:
119 		chip_name = "pitcairn";
120 		break;
121 	case CHIP_VERDE:
122 		chip_name = "verde";
123 		break;
124 	case CHIP_OLAND:
125 		chip_name = "oland";
126 		break;
127 	case CHIP_HAINAN:
128 		chip_name = "hainan";
129 		break;
130 	default: BUG();
131 	}
132 
133 	/* this memory configuration requires special firmware */
134 	if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
135 		is_58_fw = true;
136 
137 	if (is_58_fw)
138 		snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
139 	else
140 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
141 	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
142 	if (err)
143 		goto out;
144 
145 	err = amdgpu_ucode_validate(adev->gmc.fw);
146 
147 out:
148 	if (err) {
149 		dev_err(adev->dev,
150 		       "si_mc: Failed to load firmware \"%s\"\n",
151 		       fw_name);
152 		release_firmware(adev->gmc.fw);
153 		adev->gmc.fw = NULL;
154 	}
155 	return err;
156 }
157 
158 static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
159 {
160 	const __le32 *new_fw_data = NULL;
161 	u32 running;
162 	const __le32 *new_io_mc_regs = NULL;
163 	int i, regs_size, ucode_size;
164 	const struct mc_firmware_header_v1_0 *hdr;
165 
166 	if (!adev->gmc.fw)
167 		return -EINVAL;
168 
169 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
170 
171 	amdgpu_ucode_print_mc_hdr(&hdr->header);
172 
173 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
174 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
175 	new_io_mc_regs = (const __le32 *)
176 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
177 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
178 	new_fw_data = (const __le32 *)
179 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
180 
181 	running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
182 
183 	if (running == 0) {
184 
185 		/* reset the engine and set to writable */
186 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
187 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
188 
189 		/* load mc io regs */
190 		for (i = 0; i < regs_size; i++) {
191 			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
192 			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
193 		}
194 		/* load the MC ucode */
195 		for (i = 0; i < ucode_size; i++) {
196 			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
197 		}
198 
199 		/* put the engine back into the active state */
200 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
201 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
202 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
203 
204 		/* wait for training to complete */
205 		for (i = 0; i < adev->usec_timeout; i++) {
206 			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
207 				break;
208 			udelay(1);
209 		}
210 		for (i = 0; i < adev->usec_timeout; i++) {
211 			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
212 				break;
213 			udelay(1);
214 		}
215 
216 	}
217 
218 	return 0;
219 }
220 
221 static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
222 				       struct amdgpu_gmc *mc)
223 {
224 	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
225 	base <<= 24;
226 
227 	amdgpu_device_vram_location(adev, &adev->gmc, base);
228 	amdgpu_device_gart_location(adev, mc);
229 }
230 
231 static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
232 {
233 	int i, j;
234 
235 	/* Initialize HDP */
236 	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
237 		WREG32((0xb05 + j), 0x00000000);
238 		WREG32((0xb06 + j), 0x00000000);
239 		WREG32((0xb07 + j), 0x00000000);
240 		WREG32((0xb08 + j), 0x00000000);
241 		WREG32((0xb09 + j), 0x00000000);
242 	}
243 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
244 
245 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
246 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
247 	}
248 
249 	if (adev->mode_info.num_crtc) {
250 		u32 tmp;
251 
252 		/* Lockout access through VGA aperture*/
253 		tmp = RREG32(mmVGA_HDP_CONTROL);
254 		tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
255 		WREG32(mmVGA_HDP_CONTROL, tmp);
256 
257 		/* disable VGA render */
258 		tmp = RREG32(mmVGA_RENDER_CONTROL);
259 		tmp &= ~VGA_VSTATUS_CNTL;
260 		WREG32(mmVGA_RENDER_CONTROL, tmp);
261 	}
262 	/* Update configuration */
263 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
264 	       adev->gmc.vram_start >> 12);
265 	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
266 	       adev->gmc.vram_end >> 12);
267 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
268 	       adev->vram_scratch.gpu_addr >> 12);
269 	WREG32(mmMC_VM_AGP_BASE, 0);
270 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
271 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
272 
273 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
274 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
275 	}
276 }
277 
278 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
279 {
280 
281 	u32 tmp;
282 	int chansize, numchan;
283 	int r;
284 
285 	tmp = RREG32(mmMC_ARB_RAMCFG);
286 	if (tmp & (1 << 11)) {
287 		chansize = 16;
288 	} else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
289 		chansize = 64;
290 	} else {
291 		chansize = 32;
292 	}
293 	tmp = RREG32(mmMC_SHARED_CHMAP);
294 	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
295 	case 0:
296 	default:
297 		numchan = 1;
298 		break;
299 	case 1:
300 		numchan = 2;
301 		break;
302 	case 2:
303 		numchan = 4;
304 		break;
305 	case 3:
306 		numchan = 8;
307 		break;
308 	case 4:
309 		numchan = 3;
310 		break;
311 	case 5:
312 		numchan = 6;
313 		break;
314 	case 6:
315 		numchan = 10;
316 		break;
317 	case 7:
318 		numchan = 12;
319 		break;
320 	case 8:
321 		numchan = 16;
322 		break;
323 	}
324 	adev->gmc.vram_width = numchan * chansize;
325 	/* size in MB on si */
326 	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
327 	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
328 
329 	if (!(adev->flags & AMD_IS_APU)) {
330 		r = amdgpu_device_resize_fb_bar(adev);
331 		if (r)
332 			return r;
333 	}
334 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
335 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
336 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
337 
338 	/* set the gart size */
339 	if (amdgpu_gart_size == -1) {
340 		switch (adev->asic_type) {
341 		case CHIP_HAINAN:    /* no MM engines */
342 		default:
343 			adev->gmc.gart_size = 256ULL << 20;
344 			break;
345 		case CHIP_VERDE:    /* UVD, VCE do not support GPUVM */
346 		case CHIP_TAHITI:   /* UVD, VCE do not support GPUVM */
347 		case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
348 		case CHIP_OLAND:    /* UVD, VCE do not support GPUVM */
349 			adev->gmc.gart_size = 1024ULL << 20;
350 			break;
351 		}
352 	} else {
353 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
354 	}
355 
356 	gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
357 
358 	return 0;
359 }
360 
361 static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
362 {
363 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
364 }
365 
366 static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
367 					    unsigned vmid, uint64_t pd_addr)
368 {
369 	uint32_t reg;
370 
371 	/* write new base address */
372 	if (vmid < 8)
373 		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
374 	else
375 		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
376 	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
377 
378 	/* bits 0-15 are the VM contexts0-15 */
379 	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
380 
381 	return pd_addr;
382 }
383 
384 static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
385 				uint32_t gpu_page_idx, uint64_t addr,
386 				uint64_t flags)
387 {
388 	void __iomem *ptr = (void *)cpu_pt_addr;
389 	uint64_t value;
390 
391 	value = addr & 0xFFFFFFFFFFFFF000ULL;
392 	value |= flags;
393 	writeq(value, ptr + (gpu_page_idx * 8));
394 
395 	return 0;
396 }
397 
398 static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
399 					  uint32_t flags)
400 {
401 	uint64_t pte_flag = 0;
402 
403 	if (flags & AMDGPU_VM_PAGE_READABLE)
404 		pte_flag |= AMDGPU_PTE_READABLE;
405 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
406 		pte_flag |= AMDGPU_PTE_WRITEABLE;
407 	if (flags & AMDGPU_VM_PAGE_PRT)
408 		pte_flag |= AMDGPU_PTE_PRT;
409 
410 	return pte_flag;
411 }
412 
413 static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
414 				uint64_t *addr, uint64_t *flags)
415 {
416 	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
417 }
418 
419 static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
420 					      bool value)
421 {
422 	u32 tmp;
423 
424 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
425 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
426 			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
427 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
428 			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
429 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
430 			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
431 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
432 			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
433 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
434 			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
435 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
436 			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
437 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
438 }
439 
440  /**
441    + * gmc_v8_0_set_prt - set PRT VM fault
442    + *
443    + * @adev: amdgpu_device pointer
444    + * @enable: enable/disable VM fault handling for PRT
445    +*/
446 static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
447 {
448 	u32 tmp;
449 
450 	if (enable && !adev->gmc.prt_warning) {
451 		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
452 		adev->gmc.prt_warning = true;
453 	}
454 
455 	tmp = RREG32(mmVM_PRT_CNTL);
456 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
457 			    CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
458 			    enable);
459 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
460 			    TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
461 			    enable);
462 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
463 			    L2_CACHE_STORE_INVALID_ENTRIES,
464 			    enable);
465 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
466 			    L1_TLB_STORE_INVALID_ENTRIES,
467 			    enable);
468 	WREG32(mmVM_PRT_CNTL, tmp);
469 
470 	if (enable) {
471 		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
472 		uint32_t high = adev->vm_manager.max_pfn -
473 			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
474 
475 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
476 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
477 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
478 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
479 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
480 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
481 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
482 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
483 	} else {
484 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
485 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
486 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
487 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
488 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
489 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
490 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
491 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
492 	}
493 }
494 
495 static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
496 {
497 	int r, i;
498 	u32 field;
499 
500 	if (adev->gart.robj == NULL) {
501 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
502 		return -EINVAL;
503 	}
504 	r = amdgpu_gart_table_vram_pin(adev);
505 	if (r)
506 		return r;
507 	/* Setup TLB control */
508 	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
509 	       (0xA << 7) |
510 	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
511 	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
512 	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
513 	       MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
514 	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
515 	/* Setup L2 cache */
516 	WREG32(mmVM_L2_CNTL,
517 	       VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
518 	       VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
519 	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
520 	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
521 	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
522 	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
523 	WREG32(mmVM_L2_CNTL2,
524 	       VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
525 	       VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
526 
527 	field = adev->vm_manager.fragment_size;
528 	WREG32(mmVM_L2_CNTL3,
529 	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
530 	       (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
531 	       (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
532 	/* setup context0 */
533 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
534 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
535 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
536 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
537 			(u32)(adev->dummy_page_addr >> 12));
538 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
539 	WREG32(mmVM_CONTEXT0_CNTL,
540 	       VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
541 	       (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
542 	       VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
543 
544 	WREG32(0x575, 0);
545 	WREG32(0x576, 0);
546 	WREG32(0x577, 0);
547 
548 	/* empty context1-15 */
549 	/* set vm size, must be a multiple of 4 */
550 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
551 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
552 	/* Assign the pt base to something valid for now; the pts used for
553 	 * the VMs are determined by the application and setup and assigned
554 	 * on the fly in the vm part of radeon_gart.c
555 	 */
556 	for (i = 1; i < 16; i++) {
557 		if (i < 8)
558 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
559 			       adev->gart.table_addr >> 12);
560 		else
561 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
562 			       adev->gart.table_addr >> 12);
563 	}
564 
565 	/* enable context1-15 */
566 	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
567 	       (u32)(adev->dummy_page_addr >> 12));
568 	WREG32(mmVM_CONTEXT1_CNTL2, 4);
569 	WREG32(mmVM_CONTEXT1_CNTL,
570 	       VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
571 	       (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
572 	       ((adev->vm_manager.block_size - 9)
573 	       << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
574 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
575 		gmc_v6_0_set_fault_enable_default(adev, false);
576 	else
577 		gmc_v6_0_set_fault_enable_default(adev, true);
578 
579 	gmc_v6_0_flush_gpu_tlb(adev, 0);
580 	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
581 		 (unsigned)(adev->gmc.gart_size >> 20),
582 		 (unsigned long long)adev->gart.table_addr);
583 	adev->gart.ready = true;
584 	return 0;
585 }
586 
587 static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
588 {
589 	int r;
590 
591 	if (adev->gart.robj) {
592 		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
593 		return 0;
594 	}
595 	r = amdgpu_gart_init(adev);
596 	if (r)
597 		return r;
598 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
599 	adev->gart.gart_pte_flags = 0;
600 	return amdgpu_gart_table_vram_alloc(adev);
601 }
602 
603 static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
604 {
605 	/*unsigned i;
606 
607 	for (i = 1; i < 16; ++i) {
608 		uint32_t reg;
609 		if (i < 8)
610 			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
611 		else
612 			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
613 		adev->vm_manager.saved_table_addr[i] = RREG32(reg);
614 	}*/
615 
616 	/* Disable all tables */
617 	WREG32(mmVM_CONTEXT0_CNTL, 0);
618 	WREG32(mmVM_CONTEXT1_CNTL, 0);
619 	/* Setup TLB control */
620 	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
621 	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
622 	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
623 	/* Setup L2 cache */
624 	WREG32(mmVM_L2_CNTL,
625 	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
626 	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
627 	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
628 	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
629 	WREG32(mmVM_L2_CNTL2, 0);
630 	WREG32(mmVM_L2_CNTL3,
631 	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
632 	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
633 	amdgpu_gart_table_vram_unpin(adev);
634 }
635 
636 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
637 				     u32 status, u32 addr, u32 mc_client)
638 {
639 	u32 mc_id;
640 	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
641 	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
642 					PROTECTIONS);
643 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
644 		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
645 
646 	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
647 			      MEMORY_CLIENT_ID);
648 
649 	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
650 	       protections, vmid, addr,
651 	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
652 			     MEMORY_CLIENT_RW) ?
653 	       "write" : "read", block, mc_client, mc_id);
654 }
655 
656 /*
657 static const u32 mc_cg_registers[] = {
658 	MC_HUB_MISC_HUB_CG,
659 	MC_HUB_MISC_SIP_CG,
660 	MC_HUB_MISC_VM_CG,
661 	MC_XPB_CLK_GAT,
662 	ATC_MISC_CG,
663 	MC_CITF_MISC_WR_CG,
664 	MC_CITF_MISC_RD_CG,
665 	MC_CITF_MISC_VM_CG,
666 	VM_L2_CG,
667 };
668 
669 static const u32 mc_cg_ls_en[] = {
670 	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
671 	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
672 	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
673 	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
674 	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
675 	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
676 	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
677 	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
678 	VM_L2_CG__MEM_LS_ENABLE_MASK,
679 };
680 
681 static const u32 mc_cg_en[] = {
682 	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
683 	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
684 	MC_HUB_MISC_VM_CG__ENABLE_MASK,
685 	MC_XPB_CLK_GAT__ENABLE_MASK,
686 	ATC_MISC_CG__ENABLE_MASK,
687 	MC_CITF_MISC_WR_CG__ENABLE_MASK,
688 	MC_CITF_MISC_RD_CG__ENABLE_MASK,
689 	MC_CITF_MISC_VM_CG__ENABLE_MASK,
690 	VM_L2_CG__ENABLE_MASK,
691 };
692 
693 static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
694 				  bool enable)
695 {
696 	int i;
697 	u32 orig, data;
698 
699 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
700 		orig = data = RREG32(mc_cg_registers[i]);
701 		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
702 			data |= mc_cg_ls_en[i];
703 		else
704 			data &= ~mc_cg_ls_en[i];
705 		if (data != orig)
706 			WREG32(mc_cg_registers[i], data);
707 	}
708 }
709 
710 static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
711 				    bool enable)
712 {
713 	int i;
714 	u32 orig, data;
715 
716 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
717 		orig = data = RREG32(mc_cg_registers[i]);
718 		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
719 			data |= mc_cg_en[i];
720 		else
721 			data &= ~mc_cg_en[i];
722 		if (data != orig)
723 			WREG32(mc_cg_registers[i], data);
724 	}
725 }
726 
727 static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
728 				     bool enable)
729 {
730 	u32 orig, data;
731 
732 	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
733 
734 	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
735 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
736 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
737 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
738 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
739 	} else {
740 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
741 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
742 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
743 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
744 	}
745 
746 	if (orig != data)
747 		WREG32_PCIE(ixPCIE_CNTL2, data);
748 }
749 
750 static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
751 				     bool enable)
752 {
753 	u32 orig, data;
754 
755 	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
756 
757 	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
758 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
759 	else
760 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
761 
762 	if (orig != data)
763 		WREG32(mmHDP_HOST_PATH_CNTL, data);
764 }
765 
766 static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
767 				   bool enable)
768 {
769 	u32 orig, data;
770 
771 	orig = data = RREG32(mmHDP_MEM_POWER_LS);
772 
773 	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
774 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
775 	else
776 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
777 
778 	if (orig != data)
779 		WREG32(mmHDP_MEM_POWER_LS, data);
780 }
781 */
782 
783 static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
784 {
785 	switch (mc_seq_vram_type) {
786 	case MC_SEQ_MISC0__MT__GDDR1:
787 		return AMDGPU_VRAM_TYPE_GDDR1;
788 	case MC_SEQ_MISC0__MT__DDR2:
789 		return AMDGPU_VRAM_TYPE_DDR2;
790 	case MC_SEQ_MISC0__MT__GDDR3:
791 		return AMDGPU_VRAM_TYPE_GDDR3;
792 	case MC_SEQ_MISC0__MT__GDDR4:
793 		return AMDGPU_VRAM_TYPE_GDDR4;
794 	case MC_SEQ_MISC0__MT__GDDR5:
795 		return AMDGPU_VRAM_TYPE_GDDR5;
796 	case MC_SEQ_MISC0__MT__DDR3:
797 		return AMDGPU_VRAM_TYPE_DDR3;
798 	default:
799 		return AMDGPU_VRAM_TYPE_UNKNOWN;
800 	}
801 }
802 
803 static int gmc_v6_0_early_init(void *handle)
804 {
805 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
806 
807 	gmc_v6_0_set_gmc_funcs(adev);
808 	gmc_v6_0_set_irq_funcs(adev);
809 
810 	return 0;
811 }
812 
813 static int gmc_v6_0_late_init(void *handle)
814 {
815 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
816 
817 	amdgpu_bo_late_init(adev);
818 
819 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
820 		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
821 	else
822 		return 0;
823 }
824 
825 static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
826 {
827 	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
828 	unsigned size;
829 
830 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
831 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
832 	} else {
833 		u32 viewport = RREG32(mmVIEWPORT_SIZE);
834 		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
835 			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
836 			4);
837 	}
838 	/* return 0 if the pre-OS buffer uses up most of vram */
839 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
840 		return 0;
841 	return size;
842 }
843 
844 static int gmc_v6_0_sw_init(void *handle)
845 {
846 	int r;
847 	int dma_bits;
848 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
849 
850 	if (adev->flags & AMD_IS_APU) {
851 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
852 	} else {
853 		u32 tmp = RREG32(mmMC_SEQ_MISC0);
854 		tmp &= MC_SEQ_MISC0__MT__MASK;
855 		adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
856 	}
857 
858 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
859 	if (r)
860 		return r;
861 
862 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
863 	if (r)
864 		return r;
865 
866 	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
867 
868 	adev->gmc.mc_mask = 0xffffffffffULL;
869 
870 	adev->need_dma32 = false;
871 	dma_bits = adev->need_dma32 ? 32 : 40;
872 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
873 	if (r) {
874 		adev->need_dma32 = true;
875 		dma_bits = 32;
876 		dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
877 	}
878 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
879 	if (r) {
880 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
881 		dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
882 	}
883 	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
884 
885 	r = gmc_v6_0_init_microcode(adev);
886 	if (r) {
887 		dev_err(adev->dev, "Failed to load mc firmware!\n");
888 		return r;
889 	}
890 
891 	r = gmc_v6_0_mc_init(adev);
892 	if (r)
893 		return r;
894 
895 	adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev);
896 
897 	r = amdgpu_bo_init(adev);
898 	if (r)
899 		return r;
900 
901 	r = gmc_v6_0_gart_init(adev);
902 	if (r)
903 		return r;
904 
905 	/*
906 	 * number of VMs
907 	 * VMID 0 is reserved for System
908 	 * amdgpu graphics/compute will use VMIDs 1-7
909 	 * amdkfd will use VMIDs 8-15
910 	 */
911 	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
912 	amdgpu_vm_manager_init(adev);
913 
914 	/* base offset of vram pages */
915 	if (adev->flags & AMD_IS_APU) {
916 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
917 
918 		tmp <<= 22;
919 		adev->vm_manager.vram_base_offset = tmp;
920 	} else {
921 		adev->vm_manager.vram_base_offset = 0;
922 	}
923 
924 	return 0;
925 }
926 
927 static int gmc_v6_0_sw_fini(void *handle)
928 {
929 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
930 
931 	amdgpu_gem_force_release(adev);
932 	amdgpu_vm_manager_fini(adev);
933 	amdgpu_gart_table_vram_free(adev);
934 	amdgpu_bo_fini(adev);
935 	amdgpu_gart_fini(adev);
936 	release_firmware(adev->gmc.fw);
937 	adev->gmc.fw = NULL;
938 
939 	return 0;
940 }
941 
942 static int gmc_v6_0_hw_init(void *handle)
943 {
944 	int r;
945 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
946 
947 	gmc_v6_0_mc_program(adev);
948 
949 	if (!(adev->flags & AMD_IS_APU)) {
950 		r = gmc_v6_0_mc_load_microcode(adev);
951 		if (r) {
952 			dev_err(adev->dev, "Failed to load MC firmware!\n");
953 			return r;
954 		}
955 	}
956 
957 	r = gmc_v6_0_gart_enable(adev);
958 	if (r)
959 		return r;
960 
961 	return r;
962 }
963 
964 static int gmc_v6_0_hw_fini(void *handle)
965 {
966 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
967 
968 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
969 	gmc_v6_0_gart_disable(adev);
970 
971 	return 0;
972 }
973 
974 static int gmc_v6_0_suspend(void *handle)
975 {
976 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
977 
978 	gmc_v6_0_hw_fini(adev);
979 
980 	return 0;
981 }
982 
983 static int gmc_v6_0_resume(void *handle)
984 {
985 	int r;
986 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
987 
988 	r = gmc_v6_0_hw_init(adev);
989 	if (r)
990 		return r;
991 
992 	amdgpu_vmid_reset_all(adev);
993 
994 	return 0;
995 }
996 
997 static bool gmc_v6_0_is_idle(void *handle)
998 {
999 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1000 	u32 tmp = RREG32(mmSRBM_STATUS);
1001 
1002 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1003 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1004 		return false;
1005 
1006 	return true;
1007 }
1008 
1009 static int gmc_v6_0_wait_for_idle(void *handle)
1010 {
1011 	unsigned i;
1012 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1013 
1014 	for (i = 0; i < adev->usec_timeout; i++) {
1015 		if (gmc_v6_0_is_idle(handle))
1016 			return 0;
1017 		udelay(1);
1018 	}
1019 	return -ETIMEDOUT;
1020 
1021 }
1022 
1023 static int gmc_v6_0_soft_reset(void *handle)
1024 {
1025 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1026 	u32 srbm_soft_reset = 0;
1027 	u32 tmp = RREG32(mmSRBM_STATUS);
1028 
1029 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1030 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1031 						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1032 
1033 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1034 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1035 		if (!(adev->flags & AMD_IS_APU))
1036 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1037 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1038 	}
1039 
1040 	if (srbm_soft_reset) {
1041 		gmc_v6_0_mc_stop(adev);
1042 		if (gmc_v6_0_wait_for_idle(adev)) {
1043 			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1044 		}
1045 
1046 
1047 		tmp = RREG32(mmSRBM_SOFT_RESET);
1048 		tmp |= srbm_soft_reset;
1049 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1050 		WREG32(mmSRBM_SOFT_RESET, tmp);
1051 		tmp = RREG32(mmSRBM_SOFT_RESET);
1052 
1053 		udelay(50);
1054 
1055 		tmp &= ~srbm_soft_reset;
1056 		WREG32(mmSRBM_SOFT_RESET, tmp);
1057 		tmp = RREG32(mmSRBM_SOFT_RESET);
1058 
1059 		udelay(50);
1060 
1061 		gmc_v6_0_mc_resume(adev);
1062 		udelay(50);
1063 	}
1064 
1065 	return 0;
1066 }
1067 
1068 static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1069 					     struct amdgpu_irq_src *src,
1070 					     unsigned type,
1071 					     enum amdgpu_interrupt_state state)
1072 {
1073 	u32 tmp;
1074 	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1075 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1076 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1077 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1078 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1079 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1080 
1081 	switch (state) {
1082 	case AMDGPU_IRQ_STATE_DISABLE:
1083 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1084 		tmp &= ~bits;
1085 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1086 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1087 		tmp &= ~bits;
1088 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1089 		break;
1090 	case AMDGPU_IRQ_STATE_ENABLE:
1091 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1092 		tmp |= bits;
1093 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1094 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1095 		tmp |= bits;
1096 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1097 		break;
1098 	default:
1099 		break;
1100 	}
1101 
1102 	return 0;
1103 }
1104 
1105 static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
1106 				      struct amdgpu_irq_src *source,
1107 				      struct amdgpu_iv_entry *entry)
1108 {
1109 	u32 addr, status;
1110 
1111 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1112 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1113 	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1114 
1115 	if (!addr && !status)
1116 		return 0;
1117 
1118 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1119 		gmc_v6_0_set_fault_enable_default(adev, false);
1120 
1121 	if (printk_ratelimit()) {
1122 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1123 			entry->src_id, entry->src_data[0]);
1124 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1125 			addr);
1126 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1127 			status);
1128 		gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1129 	}
1130 
1131 	return 0;
1132 }
1133 
1134 static int gmc_v6_0_set_clockgating_state(void *handle,
1135 					  enum amd_clockgating_state state)
1136 {
1137 	return 0;
1138 }
1139 
1140 static int gmc_v6_0_set_powergating_state(void *handle,
1141 					  enum amd_powergating_state state)
1142 {
1143 	return 0;
1144 }
1145 
1146 static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1147 	.name = "gmc_v6_0",
1148 	.early_init = gmc_v6_0_early_init,
1149 	.late_init = gmc_v6_0_late_init,
1150 	.sw_init = gmc_v6_0_sw_init,
1151 	.sw_fini = gmc_v6_0_sw_fini,
1152 	.hw_init = gmc_v6_0_hw_init,
1153 	.hw_fini = gmc_v6_0_hw_fini,
1154 	.suspend = gmc_v6_0_suspend,
1155 	.resume = gmc_v6_0_resume,
1156 	.is_idle = gmc_v6_0_is_idle,
1157 	.wait_for_idle = gmc_v6_0_wait_for_idle,
1158 	.soft_reset = gmc_v6_0_soft_reset,
1159 	.set_clockgating_state = gmc_v6_0_set_clockgating_state,
1160 	.set_powergating_state = gmc_v6_0_set_powergating_state,
1161 };
1162 
1163 static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
1164 	.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
1165 	.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
1166 	.set_pte_pde = gmc_v6_0_set_pte_pde,
1167 	.set_prt = gmc_v6_0_set_prt,
1168 	.get_vm_pde = gmc_v6_0_get_vm_pde,
1169 	.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
1170 };
1171 
1172 static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1173 	.set = gmc_v6_0_vm_fault_interrupt_state,
1174 	.process = gmc_v6_0_process_interrupt,
1175 };
1176 
1177 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
1178 {
1179 	if (adev->gmc.gmc_funcs == NULL)
1180 		adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
1181 }
1182 
1183 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1184 {
1185 	adev->gmc.vm_fault.num_types = 1;
1186 	adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
1187 }
1188 
1189 const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
1190 {
1191 	.type = AMD_IP_BLOCK_TYPE_GMC,
1192 	.major = 6,
1193 	.minor = 0,
1194 	.rev = 0,
1195 	.funcs = &gmc_v6_0_ip_funcs,
1196 };
1197