xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c (revision ac8b6f14)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <drm/drmP.h>
25 #include <drm/drm_cache.h>
26 #include "amdgpu.h"
27 #include "gmc_v6_0.h"
28 #include "amdgpu_ucode.h"
29 #include "amdgpu_gem.h"
30 
31 #include "bif/bif_3_0_d.h"
32 #include "bif/bif_3_0_sh_mask.h"
33 #include "oss/oss_1_0_d.h"
34 #include "oss/oss_1_0_sh_mask.h"
35 #include "gmc/gmc_6_0_d.h"
36 #include "gmc/gmc_6_0_sh_mask.h"
37 #include "dce/dce_6_0_d.h"
38 #include "dce/dce_6_0_sh_mask.h"
39 #include "si_enums.h"
40 
41 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
42 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
43 static int gmc_v6_0_wait_for_idle(void *handle);
44 
45 MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
46 MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
47 MODULE_FIRMWARE("amdgpu/verde_mc.bin");
48 MODULE_FIRMWARE("amdgpu/oland_mc.bin");
49 MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
50 MODULE_FIRMWARE("amdgpu/si58_mc.bin");
51 
52 #define MC_SEQ_MISC0__MT__MASK   0xf0000000
53 #define MC_SEQ_MISC0__MT__GDDR1  0x10000000
54 #define MC_SEQ_MISC0__MT__DDR2   0x20000000
55 #define MC_SEQ_MISC0__MT__GDDR3  0x30000000
56 #define MC_SEQ_MISC0__MT__GDDR4  0x40000000
57 #define MC_SEQ_MISC0__MT__GDDR5  0x50000000
58 #define MC_SEQ_MISC0__MT__HBM    0x60000000
59 #define MC_SEQ_MISC0__MT__DDR3   0xB0000000
60 
61 
62 static const u32 crtc_offsets[6] =
63 {
64 	SI_CRTC0_REGISTER_OFFSET,
65 	SI_CRTC1_REGISTER_OFFSET,
66 	SI_CRTC2_REGISTER_OFFSET,
67 	SI_CRTC3_REGISTER_OFFSET,
68 	SI_CRTC4_REGISTER_OFFSET,
69 	SI_CRTC5_REGISTER_OFFSET
70 };
71 
72 static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
73 {
74 	u32 blackout;
75 
76 	gmc_v6_0_wait_for_idle((void *)adev);
77 
78 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
79 	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
80 		/* Block CPU access */
81 		WREG32(mmBIF_FB_EN, 0);
82 		/* blackout the MC */
83 		blackout = REG_SET_FIELD(blackout,
84 					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
85 		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
86 	}
87 	/* wait for the MC to settle */
88 	udelay(100);
89 
90 }
91 
92 static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
93 {
94 	u32 tmp;
95 
96 	/* unblackout the MC */
97 	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
98 	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
99 	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
100 	/* allow CPU access */
101 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
102 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
103 	WREG32(mmBIF_FB_EN, tmp);
104 }
105 
106 static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
107 {
108 	const char *chip_name;
109 	char fw_name[30];
110 	int err;
111 	bool is_58_fw = false;
112 
113 	DRM_DEBUG("\n");
114 
115 	switch (adev->asic_type) {
116 	case CHIP_TAHITI:
117 		chip_name = "tahiti";
118 		break;
119 	case CHIP_PITCAIRN:
120 		chip_name = "pitcairn";
121 		break;
122 	case CHIP_VERDE:
123 		chip_name = "verde";
124 		break;
125 	case CHIP_OLAND:
126 		chip_name = "oland";
127 		break;
128 	case CHIP_HAINAN:
129 		chip_name = "hainan";
130 		break;
131 	default: BUG();
132 	}
133 
134 	/* this memory configuration requires special firmware */
135 	if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
136 		is_58_fw = true;
137 
138 	if (is_58_fw)
139 		snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
140 	else
141 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
142 	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
143 	if (err)
144 		goto out;
145 
146 	err = amdgpu_ucode_validate(adev->gmc.fw);
147 
148 out:
149 	if (err) {
150 		dev_err(adev->dev,
151 		       "si_mc: Failed to load firmware \"%s\"\n",
152 		       fw_name);
153 		release_firmware(adev->gmc.fw);
154 		adev->gmc.fw = NULL;
155 	}
156 	return err;
157 }
158 
159 static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
160 {
161 	const __le32 *new_fw_data = NULL;
162 	u32 running;
163 	const __le32 *new_io_mc_regs = NULL;
164 	int i, regs_size, ucode_size;
165 	const struct mc_firmware_header_v1_0 *hdr;
166 
167 	if (!adev->gmc.fw)
168 		return -EINVAL;
169 
170 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
171 
172 	amdgpu_ucode_print_mc_hdr(&hdr->header);
173 
174 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
175 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
176 	new_io_mc_regs = (const __le32 *)
177 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
178 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
179 	new_fw_data = (const __le32 *)
180 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
181 
182 	running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
183 
184 	if (running == 0) {
185 
186 		/* reset the engine and set to writable */
187 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
188 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
189 
190 		/* load mc io regs */
191 		for (i = 0; i < regs_size; i++) {
192 			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
193 			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
194 		}
195 		/* load the MC ucode */
196 		for (i = 0; i < ucode_size; i++) {
197 			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
198 		}
199 
200 		/* put the engine back into the active state */
201 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
202 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
203 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
204 
205 		/* wait for training to complete */
206 		for (i = 0; i < adev->usec_timeout; i++) {
207 			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
208 				break;
209 			udelay(1);
210 		}
211 		for (i = 0; i < adev->usec_timeout; i++) {
212 			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
213 				break;
214 			udelay(1);
215 		}
216 
217 	}
218 
219 	return 0;
220 }
221 
222 static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
223 				       struct amdgpu_gmc *mc)
224 {
225 	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
226 	base <<= 24;
227 
228 	amdgpu_gmc_vram_location(adev, &adev->gmc, base);
229 	amdgpu_gmc_gart_location(adev, mc);
230 }
231 
232 static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
233 {
234 	int i, j;
235 
236 	/* Initialize HDP */
237 	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
238 		WREG32((0xb05 + j), 0x00000000);
239 		WREG32((0xb06 + j), 0x00000000);
240 		WREG32((0xb07 + j), 0x00000000);
241 		WREG32((0xb08 + j), 0x00000000);
242 		WREG32((0xb09 + j), 0x00000000);
243 	}
244 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
245 
246 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
247 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
248 	}
249 
250 	if (adev->mode_info.num_crtc) {
251 		u32 tmp;
252 
253 		/* Lockout access through VGA aperture*/
254 		tmp = RREG32(mmVGA_HDP_CONTROL);
255 		tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
256 		WREG32(mmVGA_HDP_CONTROL, tmp);
257 
258 		/* disable VGA render */
259 		tmp = RREG32(mmVGA_RENDER_CONTROL);
260 		tmp &= ~VGA_VSTATUS_CNTL;
261 		WREG32(mmVGA_RENDER_CONTROL, tmp);
262 	}
263 	/* Update configuration */
264 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
265 	       adev->gmc.vram_start >> 12);
266 	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
267 	       adev->gmc.vram_end >> 12);
268 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
269 	       adev->vram_scratch.gpu_addr >> 12);
270 	WREG32(mmMC_VM_AGP_BASE, 0);
271 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
272 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
273 
274 	if (gmc_v6_0_wait_for_idle((void *)adev)) {
275 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
276 	}
277 }
278 
279 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
280 {
281 
282 	u32 tmp;
283 	int chansize, numchan;
284 	int r;
285 
286 	tmp = RREG32(mmMC_ARB_RAMCFG);
287 	if (tmp & (1 << 11)) {
288 		chansize = 16;
289 	} else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
290 		chansize = 64;
291 	} else {
292 		chansize = 32;
293 	}
294 	tmp = RREG32(mmMC_SHARED_CHMAP);
295 	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
296 	case 0:
297 	default:
298 		numchan = 1;
299 		break;
300 	case 1:
301 		numchan = 2;
302 		break;
303 	case 2:
304 		numchan = 4;
305 		break;
306 	case 3:
307 		numchan = 8;
308 		break;
309 	case 4:
310 		numchan = 3;
311 		break;
312 	case 5:
313 		numchan = 6;
314 		break;
315 	case 6:
316 		numchan = 10;
317 		break;
318 	case 7:
319 		numchan = 12;
320 		break;
321 	case 8:
322 		numchan = 16;
323 		break;
324 	}
325 	adev->gmc.vram_width = numchan * chansize;
326 	/* size in MB on si */
327 	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
328 	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
329 
330 	if (!(adev->flags & AMD_IS_APU)) {
331 		r = amdgpu_device_resize_fb_bar(adev);
332 		if (r)
333 			return r;
334 	}
335 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
336 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
337 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
338 
339 	/* set the gart size */
340 	if (amdgpu_gart_size == -1) {
341 		switch (adev->asic_type) {
342 		case CHIP_HAINAN:    /* no MM engines */
343 		default:
344 			adev->gmc.gart_size = 256ULL << 20;
345 			break;
346 		case CHIP_VERDE:    /* UVD, VCE do not support GPUVM */
347 		case CHIP_TAHITI:   /* UVD, VCE do not support GPUVM */
348 		case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
349 		case CHIP_OLAND:    /* UVD, VCE do not support GPUVM */
350 			adev->gmc.gart_size = 1024ULL << 20;
351 			break;
352 		}
353 	} else {
354 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
355 	}
356 
357 	gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
358 
359 	return 0;
360 }
361 
362 static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
363 {
364 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
365 }
366 
367 static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
368 					    unsigned vmid, uint64_t pd_addr)
369 {
370 	uint32_t reg;
371 
372 	/* write new base address */
373 	if (vmid < 8)
374 		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
375 	else
376 		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
377 	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
378 
379 	/* bits 0-15 are the VM contexts0-15 */
380 	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
381 
382 	return pd_addr;
383 }
384 
385 static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
386 				uint32_t gpu_page_idx, uint64_t addr,
387 				uint64_t flags)
388 {
389 	void __iomem *ptr = (void *)cpu_pt_addr;
390 	uint64_t value;
391 
392 	value = addr & 0xFFFFFFFFFFFFF000ULL;
393 	value |= flags;
394 	writeq(value, ptr + (gpu_page_idx * 8));
395 
396 	return 0;
397 }
398 
399 static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
400 					  uint32_t flags)
401 {
402 	uint64_t pte_flag = 0;
403 
404 	if (flags & AMDGPU_VM_PAGE_READABLE)
405 		pte_flag |= AMDGPU_PTE_READABLE;
406 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
407 		pte_flag |= AMDGPU_PTE_WRITEABLE;
408 	if (flags & AMDGPU_VM_PAGE_PRT)
409 		pte_flag |= AMDGPU_PTE_PRT;
410 
411 	return pte_flag;
412 }
413 
414 static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
415 				uint64_t *addr, uint64_t *flags)
416 {
417 	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
418 }
419 
420 static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
421 					      bool value)
422 {
423 	u32 tmp;
424 
425 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
426 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
427 			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
428 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
429 			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
430 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
431 			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
432 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
433 			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
434 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
435 			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
436 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
437 			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
438 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
439 }
440 
441  /**
442    + * gmc_v8_0_set_prt - set PRT VM fault
443    + *
444    + * @adev: amdgpu_device pointer
445    + * @enable: enable/disable VM fault handling for PRT
446    +*/
447 static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
448 {
449 	u32 tmp;
450 
451 	if (enable && !adev->gmc.prt_warning) {
452 		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
453 		adev->gmc.prt_warning = true;
454 	}
455 
456 	tmp = RREG32(mmVM_PRT_CNTL);
457 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
458 			    CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
459 			    enable);
460 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
461 			    TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
462 			    enable);
463 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
464 			    L2_CACHE_STORE_INVALID_ENTRIES,
465 			    enable);
466 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
467 			    L1_TLB_STORE_INVALID_ENTRIES,
468 			    enable);
469 	WREG32(mmVM_PRT_CNTL, tmp);
470 
471 	if (enable) {
472 		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
473 		uint32_t high = adev->vm_manager.max_pfn -
474 			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
475 
476 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
477 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
478 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
479 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
480 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
481 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
482 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
483 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
484 	} else {
485 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
486 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
487 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
488 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
489 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
490 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
491 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
492 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
493 	}
494 }
495 
496 static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
497 {
498 	uint64_t table_addr;
499 	int r, i;
500 	u32 field;
501 
502 	if (adev->gart.bo == NULL) {
503 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
504 		return -EINVAL;
505 	}
506 	r = amdgpu_gart_table_vram_pin(adev);
507 	if (r)
508 		return r;
509 
510 	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
511 
512 	/* Setup TLB control */
513 	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
514 	       (0xA << 7) |
515 	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
516 	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
517 	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
518 	       MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
519 	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
520 	/* Setup L2 cache */
521 	WREG32(mmVM_L2_CNTL,
522 	       VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
523 	       VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
524 	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
525 	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
526 	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
527 	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
528 	WREG32(mmVM_L2_CNTL2,
529 	       VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
530 	       VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
531 
532 	field = adev->vm_manager.fragment_size;
533 	WREG32(mmVM_L2_CNTL3,
534 	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
535 	       (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
536 	       (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
537 	/* setup context0 */
538 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
539 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
540 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
541 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
542 			(u32)(adev->dummy_page_addr >> 12));
543 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
544 	WREG32(mmVM_CONTEXT0_CNTL,
545 	       VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
546 	       (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
547 	       VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
548 
549 	WREG32(0x575, 0);
550 	WREG32(0x576, 0);
551 	WREG32(0x577, 0);
552 
553 	/* empty context1-15 */
554 	/* set vm size, must be a multiple of 4 */
555 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
556 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
557 	/* Assign the pt base to something valid for now; the pts used for
558 	 * the VMs are determined by the application and setup and assigned
559 	 * on the fly in the vm part of radeon_gart.c
560 	 */
561 	for (i = 1; i < 16; i++) {
562 		if (i < 8)
563 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
564 			       table_addr >> 12);
565 		else
566 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
567 			       table_addr >> 12);
568 	}
569 
570 	/* enable context1-15 */
571 	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
572 	       (u32)(adev->dummy_page_addr >> 12));
573 	WREG32(mmVM_CONTEXT1_CNTL2, 4);
574 	WREG32(mmVM_CONTEXT1_CNTL,
575 	       VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
576 	       (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
577 	       ((adev->vm_manager.block_size - 9)
578 	       << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
579 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
580 		gmc_v6_0_set_fault_enable_default(adev, false);
581 	else
582 		gmc_v6_0_set_fault_enable_default(adev, true);
583 
584 	gmc_v6_0_flush_gpu_tlb(adev, 0);
585 	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
586 		 (unsigned)(adev->gmc.gart_size >> 20),
587 		 (unsigned long long)table_addr);
588 	adev->gart.ready = true;
589 	return 0;
590 }
591 
592 static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
593 {
594 	int r;
595 
596 	if (adev->gart.bo) {
597 		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
598 		return 0;
599 	}
600 	r = amdgpu_gart_init(adev);
601 	if (r)
602 		return r;
603 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
604 	adev->gart.gart_pte_flags = 0;
605 	return amdgpu_gart_table_vram_alloc(adev);
606 }
607 
608 static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
609 {
610 	/*unsigned i;
611 
612 	for (i = 1; i < 16; ++i) {
613 		uint32_t reg;
614 		if (i < 8)
615 			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
616 		else
617 			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
618 		adev->vm_manager.saved_table_addr[i] = RREG32(reg);
619 	}*/
620 
621 	/* Disable all tables */
622 	WREG32(mmVM_CONTEXT0_CNTL, 0);
623 	WREG32(mmVM_CONTEXT1_CNTL, 0);
624 	/* Setup TLB control */
625 	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
626 	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
627 	       (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
628 	/* Setup L2 cache */
629 	WREG32(mmVM_L2_CNTL,
630 	       VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
631 	       VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
632 	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
633 	       (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
634 	WREG32(mmVM_L2_CNTL2, 0);
635 	WREG32(mmVM_L2_CNTL3,
636 	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
637 	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
638 	amdgpu_gart_table_vram_unpin(adev);
639 }
640 
641 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
642 				     u32 status, u32 addr, u32 mc_client)
643 {
644 	u32 mc_id;
645 	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
646 	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
647 					PROTECTIONS);
648 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
649 		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
650 
651 	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
652 			      MEMORY_CLIENT_ID);
653 
654 	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
655 	       protections, vmid, addr,
656 	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
657 			     MEMORY_CLIENT_RW) ?
658 	       "write" : "read", block, mc_client, mc_id);
659 }
660 
661 /*
662 static const u32 mc_cg_registers[] = {
663 	MC_HUB_MISC_HUB_CG,
664 	MC_HUB_MISC_SIP_CG,
665 	MC_HUB_MISC_VM_CG,
666 	MC_XPB_CLK_GAT,
667 	ATC_MISC_CG,
668 	MC_CITF_MISC_WR_CG,
669 	MC_CITF_MISC_RD_CG,
670 	MC_CITF_MISC_VM_CG,
671 	VM_L2_CG,
672 };
673 
674 static const u32 mc_cg_ls_en[] = {
675 	MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
676 	MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
677 	MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
678 	MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
679 	ATC_MISC_CG__MEM_LS_ENABLE_MASK,
680 	MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
681 	MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
682 	MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
683 	VM_L2_CG__MEM_LS_ENABLE_MASK,
684 };
685 
686 static const u32 mc_cg_en[] = {
687 	MC_HUB_MISC_HUB_CG__ENABLE_MASK,
688 	MC_HUB_MISC_SIP_CG__ENABLE_MASK,
689 	MC_HUB_MISC_VM_CG__ENABLE_MASK,
690 	MC_XPB_CLK_GAT__ENABLE_MASK,
691 	ATC_MISC_CG__ENABLE_MASK,
692 	MC_CITF_MISC_WR_CG__ENABLE_MASK,
693 	MC_CITF_MISC_RD_CG__ENABLE_MASK,
694 	MC_CITF_MISC_VM_CG__ENABLE_MASK,
695 	VM_L2_CG__ENABLE_MASK,
696 };
697 
698 static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
699 				  bool enable)
700 {
701 	int i;
702 	u32 orig, data;
703 
704 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
705 		orig = data = RREG32(mc_cg_registers[i]);
706 		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
707 			data |= mc_cg_ls_en[i];
708 		else
709 			data &= ~mc_cg_ls_en[i];
710 		if (data != orig)
711 			WREG32(mc_cg_registers[i], data);
712 	}
713 }
714 
715 static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
716 				    bool enable)
717 {
718 	int i;
719 	u32 orig, data;
720 
721 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
722 		orig = data = RREG32(mc_cg_registers[i]);
723 		if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
724 			data |= mc_cg_en[i];
725 		else
726 			data &= ~mc_cg_en[i];
727 		if (data != orig)
728 			WREG32(mc_cg_registers[i], data);
729 	}
730 }
731 
732 static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
733 				     bool enable)
734 {
735 	u32 orig, data;
736 
737 	orig = data = RREG32_PCIE(ixPCIE_CNTL2);
738 
739 	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
740 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
741 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
742 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
743 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
744 	} else {
745 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
746 		data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
747 		data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
748 		data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
749 	}
750 
751 	if (orig != data)
752 		WREG32_PCIE(ixPCIE_CNTL2, data);
753 }
754 
755 static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
756 				     bool enable)
757 {
758 	u32 orig, data;
759 
760 	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
761 
762 	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
763 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
764 	else
765 		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
766 
767 	if (orig != data)
768 		WREG32(mmHDP_HOST_PATH_CNTL, data);
769 }
770 
771 static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
772 				   bool enable)
773 {
774 	u32 orig, data;
775 
776 	orig = data = RREG32(mmHDP_MEM_POWER_LS);
777 
778 	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
779 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
780 	else
781 		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
782 
783 	if (orig != data)
784 		WREG32(mmHDP_MEM_POWER_LS, data);
785 }
786 */
787 
788 static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
789 {
790 	switch (mc_seq_vram_type) {
791 	case MC_SEQ_MISC0__MT__GDDR1:
792 		return AMDGPU_VRAM_TYPE_GDDR1;
793 	case MC_SEQ_MISC0__MT__DDR2:
794 		return AMDGPU_VRAM_TYPE_DDR2;
795 	case MC_SEQ_MISC0__MT__GDDR3:
796 		return AMDGPU_VRAM_TYPE_GDDR3;
797 	case MC_SEQ_MISC0__MT__GDDR4:
798 		return AMDGPU_VRAM_TYPE_GDDR4;
799 	case MC_SEQ_MISC0__MT__GDDR5:
800 		return AMDGPU_VRAM_TYPE_GDDR5;
801 	case MC_SEQ_MISC0__MT__DDR3:
802 		return AMDGPU_VRAM_TYPE_DDR3;
803 	default:
804 		return AMDGPU_VRAM_TYPE_UNKNOWN;
805 	}
806 }
807 
808 static int gmc_v6_0_early_init(void *handle)
809 {
810 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
811 
812 	gmc_v6_0_set_gmc_funcs(adev);
813 	gmc_v6_0_set_irq_funcs(adev);
814 
815 	return 0;
816 }
817 
818 static int gmc_v6_0_late_init(void *handle)
819 {
820 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
821 
822 	amdgpu_bo_late_init(adev);
823 
824 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
825 		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
826 	else
827 		return 0;
828 }
829 
830 static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
831 {
832 	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
833 	unsigned size;
834 
835 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
836 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
837 	} else {
838 		u32 viewport = RREG32(mmVIEWPORT_SIZE);
839 		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
840 			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
841 			4);
842 	}
843 	/* return 0 if the pre-OS buffer uses up most of vram */
844 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
845 		return 0;
846 	return size;
847 }
848 
849 static int gmc_v6_0_sw_init(void *handle)
850 {
851 	int r;
852 	int dma_bits;
853 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
854 
855 	if (adev->flags & AMD_IS_APU) {
856 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
857 	} else {
858 		u32 tmp = RREG32(mmMC_SEQ_MISC0);
859 		tmp &= MC_SEQ_MISC0__MT__MASK;
860 		adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
861 	}
862 
863 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
864 	if (r)
865 		return r;
866 
867 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
868 	if (r)
869 		return r;
870 
871 	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
872 
873 	adev->gmc.mc_mask = 0xffffffffffULL;
874 
875 	adev->need_dma32 = false;
876 	dma_bits = adev->need_dma32 ? 32 : 40;
877 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
878 	if (r) {
879 		adev->need_dma32 = true;
880 		dma_bits = 32;
881 		dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
882 	}
883 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
884 	if (r) {
885 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
886 		dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
887 	}
888 	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
889 
890 	r = gmc_v6_0_init_microcode(adev);
891 	if (r) {
892 		dev_err(adev->dev, "Failed to load mc firmware!\n");
893 		return r;
894 	}
895 
896 	r = gmc_v6_0_mc_init(adev);
897 	if (r)
898 		return r;
899 
900 	adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev);
901 
902 	r = amdgpu_bo_init(adev);
903 	if (r)
904 		return r;
905 
906 	r = gmc_v6_0_gart_init(adev);
907 	if (r)
908 		return r;
909 
910 	/*
911 	 * number of VMs
912 	 * VMID 0 is reserved for System
913 	 * amdgpu graphics/compute will use VMIDs 1-7
914 	 * amdkfd will use VMIDs 8-15
915 	 */
916 	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
917 	amdgpu_vm_manager_init(adev);
918 
919 	/* base offset of vram pages */
920 	if (adev->flags & AMD_IS_APU) {
921 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
922 
923 		tmp <<= 22;
924 		adev->vm_manager.vram_base_offset = tmp;
925 	} else {
926 		adev->vm_manager.vram_base_offset = 0;
927 	}
928 
929 	return 0;
930 }
931 
932 static int gmc_v6_0_sw_fini(void *handle)
933 {
934 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
935 
936 	amdgpu_gem_force_release(adev);
937 	amdgpu_vm_manager_fini(adev);
938 	amdgpu_gart_table_vram_free(adev);
939 	amdgpu_bo_fini(adev);
940 	amdgpu_gart_fini(adev);
941 	release_firmware(adev->gmc.fw);
942 	adev->gmc.fw = NULL;
943 
944 	return 0;
945 }
946 
947 static int gmc_v6_0_hw_init(void *handle)
948 {
949 	int r;
950 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
951 
952 	gmc_v6_0_mc_program(adev);
953 
954 	if (!(adev->flags & AMD_IS_APU)) {
955 		r = gmc_v6_0_mc_load_microcode(adev);
956 		if (r) {
957 			dev_err(adev->dev, "Failed to load MC firmware!\n");
958 			return r;
959 		}
960 	}
961 
962 	r = gmc_v6_0_gart_enable(adev);
963 	if (r)
964 		return r;
965 
966 	return r;
967 }
968 
969 static int gmc_v6_0_hw_fini(void *handle)
970 {
971 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
972 
973 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
974 	gmc_v6_0_gart_disable(adev);
975 
976 	return 0;
977 }
978 
979 static int gmc_v6_0_suspend(void *handle)
980 {
981 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
982 
983 	gmc_v6_0_hw_fini(adev);
984 
985 	return 0;
986 }
987 
988 static int gmc_v6_0_resume(void *handle)
989 {
990 	int r;
991 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
992 
993 	r = gmc_v6_0_hw_init(adev);
994 	if (r)
995 		return r;
996 
997 	amdgpu_vmid_reset_all(adev);
998 
999 	return 0;
1000 }
1001 
1002 static bool gmc_v6_0_is_idle(void *handle)
1003 {
1004 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1005 	u32 tmp = RREG32(mmSRBM_STATUS);
1006 
1007 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1008 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1009 		return false;
1010 
1011 	return true;
1012 }
1013 
1014 static int gmc_v6_0_wait_for_idle(void *handle)
1015 {
1016 	unsigned i;
1017 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1018 
1019 	for (i = 0; i < adev->usec_timeout; i++) {
1020 		if (gmc_v6_0_is_idle(handle))
1021 			return 0;
1022 		udelay(1);
1023 	}
1024 	return -ETIMEDOUT;
1025 
1026 }
1027 
1028 static int gmc_v6_0_soft_reset(void *handle)
1029 {
1030 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1031 	u32 srbm_soft_reset = 0;
1032 	u32 tmp = RREG32(mmSRBM_STATUS);
1033 
1034 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1035 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1036 						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1037 
1038 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1039 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1040 		if (!(adev->flags & AMD_IS_APU))
1041 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1042 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1043 	}
1044 
1045 	if (srbm_soft_reset) {
1046 		gmc_v6_0_mc_stop(adev);
1047 		if (gmc_v6_0_wait_for_idle(adev)) {
1048 			dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1049 		}
1050 
1051 
1052 		tmp = RREG32(mmSRBM_SOFT_RESET);
1053 		tmp |= srbm_soft_reset;
1054 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1055 		WREG32(mmSRBM_SOFT_RESET, tmp);
1056 		tmp = RREG32(mmSRBM_SOFT_RESET);
1057 
1058 		udelay(50);
1059 
1060 		tmp &= ~srbm_soft_reset;
1061 		WREG32(mmSRBM_SOFT_RESET, tmp);
1062 		tmp = RREG32(mmSRBM_SOFT_RESET);
1063 
1064 		udelay(50);
1065 
1066 		gmc_v6_0_mc_resume(adev);
1067 		udelay(50);
1068 	}
1069 
1070 	return 0;
1071 }
1072 
1073 static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1074 					     struct amdgpu_irq_src *src,
1075 					     unsigned type,
1076 					     enum amdgpu_interrupt_state state)
1077 {
1078 	u32 tmp;
1079 	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1080 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1081 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1082 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1083 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1084 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1085 
1086 	switch (state) {
1087 	case AMDGPU_IRQ_STATE_DISABLE:
1088 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1089 		tmp &= ~bits;
1090 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1091 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1092 		tmp &= ~bits;
1093 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1094 		break;
1095 	case AMDGPU_IRQ_STATE_ENABLE:
1096 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1097 		tmp |= bits;
1098 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1099 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1100 		tmp |= bits;
1101 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1102 		break;
1103 	default:
1104 		break;
1105 	}
1106 
1107 	return 0;
1108 }
1109 
1110 static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
1111 				      struct amdgpu_irq_src *source,
1112 				      struct amdgpu_iv_entry *entry)
1113 {
1114 	u32 addr, status;
1115 
1116 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1117 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1118 	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1119 
1120 	if (!addr && !status)
1121 		return 0;
1122 
1123 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1124 		gmc_v6_0_set_fault_enable_default(adev, false);
1125 
1126 	if (printk_ratelimit()) {
1127 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1128 			entry->src_id, entry->src_data[0]);
1129 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1130 			addr);
1131 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1132 			status);
1133 		gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1134 	}
1135 
1136 	return 0;
1137 }
1138 
1139 static int gmc_v6_0_set_clockgating_state(void *handle,
1140 					  enum amd_clockgating_state state)
1141 {
1142 	return 0;
1143 }
1144 
1145 static int gmc_v6_0_set_powergating_state(void *handle,
1146 					  enum amd_powergating_state state)
1147 {
1148 	return 0;
1149 }
1150 
1151 static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1152 	.name = "gmc_v6_0",
1153 	.early_init = gmc_v6_0_early_init,
1154 	.late_init = gmc_v6_0_late_init,
1155 	.sw_init = gmc_v6_0_sw_init,
1156 	.sw_fini = gmc_v6_0_sw_fini,
1157 	.hw_init = gmc_v6_0_hw_init,
1158 	.hw_fini = gmc_v6_0_hw_fini,
1159 	.suspend = gmc_v6_0_suspend,
1160 	.resume = gmc_v6_0_resume,
1161 	.is_idle = gmc_v6_0_is_idle,
1162 	.wait_for_idle = gmc_v6_0_wait_for_idle,
1163 	.soft_reset = gmc_v6_0_soft_reset,
1164 	.set_clockgating_state = gmc_v6_0_set_clockgating_state,
1165 	.set_powergating_state = gmc_v6_0_set_powergating_state,
1166 };
1167 
1168 static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
1169 	.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
1170 	.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
1171 	.set_pte_pde = gmc_v6_0_set_pte_pde,
1172 	.set_prt = gmc_v6_0_set_prt,
1173 	.get_vm_pde = gmc_v6_0_get_vm_pde,
1174 	.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
1175 };
1176 
1177 static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1178 	.set = gmc_v6_0_vm_fault_interrupt_state,
1179 	.process = gmc_v6_0_process_interrupt,
1180 };
1181 
1182 static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
1183 {
1184 	adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
1185 }
1186 
1187 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1188 {
1189 	adev->gmc.vm_fault.num_types = 1;
1190 	adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
1191 }
1192 
1193 const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
1194 {
1195 	.type = AMD_IP_BLOCK_TYPE_GMC,
1196 	.major = 6,
1197 	.minor = 0,
1198 	.rev = 0,
1199 	.funcs = &gmc_v6_0_ip_funcs,
1200 };
1201