xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c (revision f5c27da4)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include <drm/drm_cache.h>
29 #include "amdgpu.h"
30 #include "gmc_v8_0.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_amdkfd.h"
33 #include "amdgpu_gem.h"
34 
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
37 
38 #include "bif/bif_5_0_d.h"
39 #include "bif/bif_5_0_sh_mask.h"
40 
41 #include "oss/oss_3_0_d.h"
42 #include "oss/oss_3_0_sh_mask.h"
43 
44 #include "dce/dce_10_0_d.h"
45 #include "dce/dce_10_0_sh_mask.h"
46 
47 #include "vid.h"
48 #include "vi.h"
49 
50 #include "amdgpu_atombios.h"
51 
52 #include "ivsrcid/ivsrcid_vislands30.h"
53 
54 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
55 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
56 static int gmc_v8_0_wait_for_idle(void *handle);
57 
58 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
59 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
60 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
61 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
62 MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
63 MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
64 MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
65 MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
66 
67 static const u32 golden_settings_tonga_a11[] =
68 {
69 	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
70 	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
71 	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
72 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
73 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
74 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
75 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
76 };
77 
78 static const u32 tonga_mgcg_cgcg_init[] =
79 {
80 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
81 };
82 
83 static const u32 golden_settings_fiji_a10[] =
84 {
85 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
86 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
87 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
88 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
89 };
90 
91 static const u32 fiji_mgcg_cgcg_init[] =
92 {
93 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
94 };
95 
96 static const u32 golden_settings_polaris11_a11[] =
97 {
98 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
99 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
100 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
101 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
102 };
103 
104 static const u32 golden_settings_polaris10_a11[] =
105 {
106 	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
107 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
108 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
109 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
110 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
111 };
112 
113 static const u32 cz_mgcg_cgcg_init[] =
114 {
115 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
116 };
117 
118 static const u32 stoney_mgcg_cgcg_init[] =
119 {
120 	mmATC_MISC_CG, 0xffffffff, 0x000c0200,
121 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
122 };
123 
124 static const u32 golden_settings_stoney_common[] =
125 {
126 	mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
127 	mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
128 };
129 
130 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
131 {
132 	switch (adev->asic_type) {
133 	case CHIP_FIJI:
134 		amdgpu_device_program_register_sequence(adev,
135 							fiji_mgcg_cgcg_init,
136 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
137 		amdgpu_device_program_register_sequence(adev,
138 							golden_settings_fiji_a10,
139 							ARRAY_SIZE(golden_settings_fiji_a10));
140 		break;
141 	case CHIP_TONGA:
142 		amdgpu_device_program_register_sequence(adev,
143 							tonga_mgcg_cgcg_init,
144 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
145 		amdgpu_device_program_register_sequence(adev,
146 							golden_settings_tonga_a11,
147 							ARRAY_SIZE(golden_settings_tonga_a11));
148 		break;
149 	case CHIP_POLARIS11:
150 	case CHIP_POLARIS12:
151 	case CHIP_VEGAM:
152 		amdgpu_device_program_register_sequence(adev,
153 							golden_settings_polaris11_a11,
154 							ARRAY_SIZE(golden_settings_polaris11_a11));
155 		break;
156 	case CHIP_POLARIS10:
157 		amdgpu_device_program_register_sequence(adev,
158 							golden_settings_polaris10_a11,
159 							ARRAY_SIZE(golden_settings_polaris10_a11));
160 		break;
161 	case CHIP_CARRIZO:
162 		amdgpu_device_program_register_sequence(adev,
163 							cz_mgcg_cgcg_init,
164 							ARRAY_SIZE(cz_mgcg_cgcg_init));
165 		break;
166 	case CHIP_STONEY:
167 		amdgpu_device_program_register_sequence(adev,
168 							stoney_mgcg_cgcg_init,
169 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
170 		amdgpu_device_program_register_sequence(adev,
171 							golden_settings_stoney_common,
172 							ARRAY_SIZE(golden_settings_stoney_common));
173 		break;
174 	default:
175 		break;
176 	}
177 }
178 
179 static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
180 {
181 	u32 blackout;
182 
183 	gmc_v8_0_wait_for_idle(adev);
184 
185 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
186 	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
187 		/* Block CPU access */
188 		WREG32(mmBIF_FB_EN, 0);
189 		/* blackout the MC */
190 		blackout = REG_SET_FIELD(blackout,
191 					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
192 		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
193 	}
194 	/* wait for the MC to settle */
195 	udelay(100);
196 }
197 
198 static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
199 {
200 	u32 tmp;
201 
202 	/* unblackout the MC */
203 	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
204 	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
205 	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
206 	/* allow CPU access */
207 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
208 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
209 	WREG32(mmBIF_FB_EN, tmp);
210 }
211 
212 /**
213  * gmc_v8_0_init_microcode - load ucode images from disk
214  *
215  * @adev: amdgpu_device pointer
216  *
217  * Use the firmware interface to load the ucode images into
218  * the driver (not loaded into hw).
219  * Returns 0 on success, error on failure.
220  */
221 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
222 {
223 	const char *chip_name;
224 	char fw_name[30];
225 	int err;
226 
227 	DRM_DEBUG("\n");
228 
229 	switch (adev->asic_type) {
230 	case CHIP_TONGA:
231 		chip_name = "tonga";
232 		break;
233 	case CHIP_POLARIS11:
234 		if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
235 		    ASICID_IS_P31(adev->pdev->device, adev->pdev->revision))
236 			chip_name = "polaris11_k";
237 		else
238 			chip_name = "polaris11";
239 		break;
240 	case CHIP_POLARIS10:
241 		if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision))
242 			chip_name = "polaris10_k";
243 		else
244 			chip_name = "polaris10";
245 		break;
246 	case CHIP_POLARIS12:
247 		if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
248 			chip_name = "polaris12_k";
249 		} else {
250 			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
251 			/* Polaris12 32bit ASIC needs a special MC firmware */
252 			if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
253 				chip_name = "polaris12_32";
254 			else
255 				chip_name = "polaris12";
256 		}
257 		break;
258 	case CHIP_FIJI:
259 	case CHIP_CARRIZO:
260 	case CHIP_STONEY:
261 	case CHIP_VEGAM:
262 		return 0;
263 	default: BUG();
264 	}
265 
266 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
267 	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
268 	if (err)
269 		goto out;
270 	err = amdgpu_ucode_validate(adev->gmc.fw);
271 
272 out:
273 	if (err) {
274 		pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
275 		release_firmware(adev->gmc.fw);
276 		adev->gmc.fw = NULL;
277 	}
278 	return err;
279 }
280 
281 /**
282  * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
283  *
284  * @adev: amdgpu_device pointer
285  *
286  * Load the GDDR MC ucode into the hw (VI).
287  * Returns 0 on success, error on failure.
288  */
289 static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
290 {
291 	const struct mc_firmware_header_v1_0 *hdr;
292 	const __le32 *fw_data = NULL;
293 	const __le32 *io_mc_regs = NULL;
294 	u32 running;
295 	int i, ucode_size, regs_size;
296 
297 	/* Skip MC ucode loading on SR-IOV capable boards.
298 	 * vbios does this for us in asic_init in that case.
299 	 * Skip MC ucode loading on VF, because hypervisor will do that
300 	 * for this adaptor.
301 	 */
302 	if (amdgpu_sriov_bios(adev))
303 		return 0;
304 
305 	if (!adev->gmc.fw)
306 		return -EINVAL;
307 
308 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
309 	amdgpu_ucode_print_mc_hdr(&hdr->header);
310 
311 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
312 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
313 	io_mc_regs = (const __le32 *)
314 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
315 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
316 	fw_data = (const __le32 *)
317 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
318 
319 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
320 
321 	if (running == 0) {
322 		/* reset the engine and set to writable */
323 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
324 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
325 
326 		/* load mc io regs */
327 		for (i = 0; i < regs_size; i++) {
328 			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
329 			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
330 		}
331 		/* load the MC ucode */
332 		for (i = 0; i < ucode_size; i++)
333 			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
334 
335 		/* put the engine back into the active state */
336 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
337 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
338 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
339 
340 		/* wait for training to complete */
341 		for (i = 0; i < adev->usec_timeout; i++) {
342 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
343 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
344 				break;
345 			udelay(1);
346 		}
347 		for (i = 0; i < adev->usec_timeout; i++) {
348 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
349 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
350 				break;
351 			udelay(1);
352 		}
353 	}
354 
355 	return 0;
356 }
357 
358 static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
359 {
360 	const struct mc_firmware_header_v1_0 *hdr;
361 	const __le32 *fw_data = NULL;
362 	const __le32 *io_mc_regs = NULL;
363 	u32 data;
364 	int i, ucode_size, regs_size;
365 
366 	/* Skip MC ucode loading on SR-IOV capable boards.
367 	 * vbios does this for us in asic_init in that case.
368 	 * Skip MC ucode loading on VF, because hypervisor will do that
369 	 * for this adaptor.
370 	 */
371 	if (amdgpu_sriov_bios(adev))
372 		return 0;
373 
374 	if (!adev->gmc.fw)
375 		return -EINVAL;
376 
377 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
378 	amdgpu_ucode_print_mc_hdr(&hdr->header);
379 
380 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
381 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
382 	io_mc_regs = (const __le32 *)
383 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
384 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
385 	fw_data = (const __le32 *)
386 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
387 
388 	data = RREG32(mmMC_SEQ_MISC0);
389 	data &= ~(0x40);
390 	WREG32(mmMC_SEQ_MISC0, data);
391 
392 	/* load mc io regs */
393 	for (i = 0; i < regs_size; i++) {
394 		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
395 		WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
396 	}
397 
398 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
399 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
400 
401 	/* load the MC ucode */
402 	for (i = 0; i < ucode_size; i++)
403 		WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
404 
405 	/* put the engine back into the active state */
406 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
407 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
408 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
409 
410 	/* wait for training to complete */
411 	for (i = 0; i < adev->usec_timeout; i++) {
412 		data = RREG32(mmMC_SEQ_MISC0);
413 		if (data & 0x80)
414 			break;
415 		udelay(1);
416 	}
417 
418 	return 0;
419 }
420 
421 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
422 				       struct amdgpu_gmc *mc)
423 {
424 	u64 base = 0;
425 
426 	if (!amdgpu_sriov_vf(adev))
427 		base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
428 	base <<= 24;
429 
430 	amdgpu_gmc_vram_location(adev, mc, base);
431 	amdgpu_gmc_gart_location(adev, mc);
432 }
433 
434 /**
435  * gmc_v8_0_mc_program - program the GPU memory controller
436  *
437  * @adev: amdgpu_device pointer
438  *
439  * Set the location of vram, gart, and AGP in the GPU's
440  * physical address space (VI).
441  */
442 static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
443 {
444 	u32 tmp;
445 	int i, j;
446 
447 	/* Initialize HDP */
448 	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
449 		WREG32((0xb05 + j), 0x00000000);
450 		WREG32((0xb06 + j), 0x00000000);
451 		WREG32((0xb07 + j), 0x00000000);
452 		WREG32((0xb08 + j), 0x00000000);
453 		WREG32((0xb09 + j), 0x00000000);
454 	}
455 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
456 
457 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
458 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
459 	}
460 	if (adev->mode_info.num_crtc) {
461 		/* Lockout access through VGA aperture*/
462 		tmp = RREG32(mmVGA_HDP_CONTROL);
463 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
464 		WREG32(mmVGA_HDP_CONTROL, tmp);
465 
466 		/* disable VGA render */
467 		tmp = RREG32(mmVGA_RENDER_CONTROL);
468 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
469 		WREG32(mmVGA_RENDER_CONTROL, tmp);
470 	}
471 	/* Update configuration */
472 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
473 	       adev->gmc.vram_start >> 12);
474 	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
475 	       adev->gmc.vram_end >> 12);
476 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
477 	       adev->vram_scratch.gpu_addr >> 12);
478 
479 	if (amdgpu_sriov_vf(adev)) {
480 		tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
481 		tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
482 		WREG32(mmMC_VM_FB_LOCATION, tmp);
483 		/* XXX double check these! */
484 		WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
485 		WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
486 		WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
487 	}
488 
489 	WREG32(mmMC_VM_AGP_BASE, 0);
490 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
491 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
492 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
493 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
494 	}
495 
496 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
497 
498 	tmp = RREG32(mmHDP_MISC_CNTL);
499 	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
500 	WREG32(mmHDP_MISC_CNTL, tmp);
501 
502 	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
503 	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
504 }
505 
506 /**
507  * gmc_v8_0_mc_init - initialize the memory controller driver params
508  *
509  * @adev: amdgpu_device pointer
510  *
511  * Look up the amount of vram, vram width, and decide how to place
512  * vram and gart within the GPU's physical address space (VI).
513  * Returns 0 for success.
514  */
515 static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
516 {
517 	int r;
518 	u32 tmp;
519 
520 	adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
521 	if (!adev->gmc.vram_width) {
522 		int chansize, numchan;
523 
524 		/* Get VRAM informations */
525 		tmp = RREG32(mmMC_ARB_RAMCFG);
526 		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
527 			chansize = 64;
528 		} else {
529 			chansize = 32;
530 		}
531 		tmp = RREG32(mmMC_SHARED_CHMAP);
532 		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
533 		case 0:
534 		default:
535 			numchan = 1;
536 			break;
537 		case 1:
538 			numchan = 2;
539 			break;
540 		case 2:
541 			numchan = 4;
542 			break;
543 		case 3:
544 			numchan = 8;
545 			break;
546 		case 4:
547 			numchan = 3;
548 			break;
549 		case 5:
550 			numchan = 6;
551 			break;
552 		case 6:
553 			numchan = 10;
554 			break;
555 		case 7:
556 			numchan = 12;
557 			break;
558 		case 8:
559 			numchan = 16;
560 			break;
561 		}
562 		adev->gmc.vram_width = numchan * chansize;
563 	}
564 	/* size in MB on si */
565 	tmp = RREG32(mmCONFIG_MEMSIZE);
566 	/* some boards may have garbage in the upper 16 bits */
567 	if (tmp & 0xffff0000) {
568 		DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
569 		if (tmp & 0xffff)
570 			tmp &= 0xffff;
571 	}
572 	adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL;
573 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
574 
575 	if (!(adev->flags & AMD_IS_APU)) {
576 		r = amdgpu_device_resize_fb_bar(adev);
577 		if (r)
578 			return r;
579 	}
580 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
581 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
582 
583 #ifdef CONFIG_X86_64
584 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
585 		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
586 		adev->gmc.aper_size = adev->gmc.real_vram_size;
587 	}
588 #endif
589 
590 	/* In case the PCI BAR is larger than the actual amount of vram */
591 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
592 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
593 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
594 
595 	/* set the gart size */
596 	if (amdgpu_gart_size == -1) {
597 		switch (adev->asic_type) {
598 		case CHIP_POLARIS10: /* all engines support GPUVM */
599 		case CHIP_POLARIS11: /* all engines support GPUVM */
600 		case CHIP_POLARIS12: /* all engines support GPUVM */
601 		case CHIP_VEGAM:     /* all engines support GPUVM */
602 		default:
603 			adev->gmc.gart_size = 256ULL << 20;
604 			break;
605 		case CHIP_TONGA:   /* UVD, VCE do not support GPUVM */
606 		case CHIP_FIJI:    /* UVD, VCE do not support GPUVM */
607 		case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
608 		case CHIP_STONEY:  /* UVD does not support GPUVM, DCE SG support */
609 			adev->gmc.gart_size = 1024ULL << 20;
610 			break;
611 		}
612 	} else {
613 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
614 	}
615 
616 	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
617 	gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
618 
619 	return 0;
620 }
621 
622 /**
623  * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
624  *
625  * @adev: amdgpu_device pointer
626  * @pasid: pasid to be flush
627  * @flush_type: type of flush
628  * @all_hub: flush all hubs
629  *
630  * Flush the TLB for the requested pasid.
631  */
632 static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
633 					uint16_t pasid, uint32_t flush_type,
634 					bool all_hub)
635 {
636 	int vmid;
637 	unsigned int tmp;
638 
639 	if (amdgpu_in_reset(adev))
640 		return -EIO;
641 
642 	for (vmid = 1; vmid < 16; vmid++) {
643 
644 		tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
645 		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
646 			(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
647 			WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
648 			RREG32(mmVM_INVALIDATE_RESPONSE);
649 			break;
650 		}
651 	}
652 
653 	return 0;
654 
655 }
656 
657 /*
658  * GART
659  * VMID 0 is the physical GPU addresses as used by the kernel.
660  * VMIDs 1-15 are used for userspace clients and are handled
661  * by the amdgpu vm/hsa code.
662  */
663 
664 /**
665  * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
666  *
667  * @adev: amdgpu_device pointer
668  * @vmid: vm instance to flush
669  * @vmhub: which hub to flush
670  * @flush_type: type of flush
671  *
672  * Flush the TLB for the requested page table (VI).
673  */
674 static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
675 					uint32_t vmhub, uint32_t flush_type)
676 {
677 	/* bits 0-15 are the VM contexts0-15 */
678 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
679 }
680 
681 static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
682 					    unsigned vmid, uint64_t pd_addr)
683 {
684 	uint32_t reg;
685 
686 	if (vmid < 8)
687 		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
688 	else
689 		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
690 	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
691 
692 	/* bits 0-15 are the VM contexts0-15 */
693 	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
694 
695 	return pd_addr;
696 }
697 
698 static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
699 					unsigned pasid)
700 {
701 	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
702 }
703 
704 /*
705  * PTE format on VI:
706  * 63:40 reserved
707  * 39:12 4k physical page base address
708  * 11:7 fragment
709  * 6 write
710  * 5 read
711  * 4 exe
712  * 3 reserved
713  * 2 snooped
714  * 1 system
715  * 0 valid
716  *
717  * PDE format on VI:
718  * 63:59 block fragment size
719  * 58:40 reserved
720  * 39:1 physical base address of PTE
721  * bits 5:1 must be 0.
722  * 0 valid
723  */
724 
725 static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
726 				uint64_t *addr, uint64_t *flags)
727 {
728 	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
729 }
730 
731 static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
732 				struct amdgpu_bo_va_mapping *mapping,
733 				uint64_t *flags)
734 {
735 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
736 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
737 	*flags &= ~AMDGPU_PTE_PRT;
738 }
739 
740 /**
741  * gmc_v8_0_set_fault_enable_default - update VM fault handling
742  *
743  * @adev: amdgpu_device pointer
744  * @value: true redirects VM faults to the default page
745  */
746 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
747 					      bool value)
748 {
749 	u32 tmp;
750 
751 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
752 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
753 			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
754 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
755 			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
756 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
757 			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
758 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
759 			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
760 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
761 			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
762 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
763 			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
764 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
765 			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
766 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
767 }
768 
769 /**
770  * gmc_v8_0_set_prt - set PRT VM fault
771  *
772  * @adev: amdgpu_device pointer
773  * @enable: enable/disable VM fault handling for PRT
774 */
775 static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
776 {
777 	u32 tmp;
778 
779 	if (enable && !adev->gmc.prt_warning) {
780 		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
781 		adev->gmc.prt_warning = true;
782 	}
783 
784 	tmp = RREG32(mmVM_PRT_CNTL);
785 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
786 			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
787 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
788 			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
789 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
790 			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
791 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
792 			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
793 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
794 			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
795 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
796 			    L1_TLB_STORE_INVALID_ENTRIES, enable);
797 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
798 			    MASK_PDE0_FAULT, enable);
799 	WREG32(mmVM_PRT_CNTL, tmp);
800 
801 	if (enable) {
802 		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
803 		uint32_t high = adev->vm_manager.max_pfn -
804 			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
805 
806 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
807 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
808 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
809 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
810 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
811 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
812 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
813 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
814 	} else {
815 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
816 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
817 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
818 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
819 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
820 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
821 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
822 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
823 	}
824 }
825 
826 /**
827  * gmc_v8_0_gart_enable - gart enable
828  *
829  * @adev: amdgpu_device pointer
830  *
831  * This sets up the TLBs, programs the page tables for VMID0,
832  * sets up the hw for VMIDs 1-15 which are allocated on
833  * demand, and sets up the global locations for the LDS, GDS,
834  * and GPUVM for FSA64 clients (VI).
835  * Returns 0 for success, errors for failure.
836  */
837 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
838 {
839 	uint64_t table_addr;
840 	u32 tmp, field;
841 	int i;
842 
843 	if (adev->gart.bo == NULL) {
844 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
845 		return -EINVAL;
846 	}
847 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
848 	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
849 
850 	/* Setup TLB control */
851 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
852 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
853 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
854 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
855 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
856 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
857 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
858 	/* Setup L2 cache */
859 	tmp = RREG32(mmVM_L2_CNTL);
860 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
861 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
862 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
863 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
864 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
865 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
866 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
867 	WREG32(mmVM_L2_CNTL, tmp);
868 	tmp = RREG32(mmVM_L2_CNTL2);
869 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
870 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
871 	WREG32(mmVM_L2_CNTL2, tmp);
872 
873 	field = adev->vm_manager.fragment_size;
874 	tmp = RREG32(mmVM_L2_CNTL3);
875 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
876 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
877 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
878 	WREG32(mmVM_L2_CNTL3, tmp);
879 	/* XXX: set to enable PTE/PDE in system memory */
880 	tmp = RREG32(mmVM_L2_CNTL4);
881 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
882 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
883 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
884 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
885 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
886 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
887 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
888 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
889 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
890 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
891 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
892 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
893 	WREG32(mmVM_L2_CNTL4, tmp);
894 	/* setup context0 */
895 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
896 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
897 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
898 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
899 			(u32)(adev->dummy_page_addr >> 12));
900 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
901 	tmp = RREG32(mmVM_CONTEXT0_CNTL);
902 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
903 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
904 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
905 	WREG32(mmVM_CONTEXT0_CNTL, tmp);
906 
907 	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
908 	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
909 	WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
910 
911 	/* empty context1-15 */
912 	/* FIXME start with 4G, once using 2 level pt switch to full
913 	 * vm size space
914 	 */
915 	/* set vm size, must be a multiple of 4 */
916 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
917 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
918 	for (i = 1; i < AMDGPU_NUM_VMID; i++) {
919 		if (i < 8)
920 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
921 			       table_addr >> 12);
922 		else
923 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
924 			       table_addr >> 12);
925 	}
926 
927 	/* enable context1-15 */
928 	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
929 	       (u32)(adev->dummy_page_addr >> 12));
930 	WREG32(mmVM_CONTEXT1_CNTL2, 4);
931 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
932 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
933 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
934 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
935 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
936 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
937 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
938 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
939 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
940 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
941 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
942 			    adev->vm_manager.block_size - 9);
943 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
944 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
945 		gmc_v8_0_set_fault_enable_default(adev, false);
946 	else
947 		gmc_v8_0_set_fault_enable_default(adev, true);
948 
949 	gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
950 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
951 		 (unsigned)(adev->gmc.gart_size >> 20),
952 		 (unsigned long long)table_addr);
953 	return 0;
954 }
955 
956 static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
957 {
958 	int r;
959 
960 	if (adev->gart.bo) {
961 		WARN(1, "R600 PCIE GART already initialized\n");
962 		return 0;
963 	}
964 	/* Initialize common gart structure */
965 	r = amdgpu_gart_init(adev);
966 	if (r)
967 		return r;
968 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
969 	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
970 	return amdgpu_gart_table_vram_alloc(adev);
971 }
972 
973 /**
974  * gmc_v8_0_gart_disable - gart disable
975  *
976  * @adev: amdgpu_device pointer
977  *
978  * This disables all VM page table (VI).
979  */
980 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
981 {
982 	u32 tmp;
983 
984 	/* Disable all tables */
985 	WREG32(mmVM_CONTEXT0_CNTL, 0);
986 	WREG32(mmVM_CONTEXT1_CNTL, 0);
987 	/* Setup TLB control */
988 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
989 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
990 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
991 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
992 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
993 	/* Setup L2 cache */
994 	tmp = RREG32(mmVM_L2_CNTL);
995 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
996 	WREG32(mmVM_L2_CNTL, tmp);
997 	WREG32(mmVM_L2_CNTL2, 0);
998 }
999 
1000 /**
1001  * gmc_v8_0_vm_decode_fault - print human readable fault info
1002  *
1003  * @adev: amdgpu_device pointer
1004  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
1005  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
1006  * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
1007  * @pasid: debug logging only - no functional use
1008  *
1009  * Print human readable fault information (VI).
1010  */
1011 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
1012 				     u32 addr, u32 mc_client, unsigned pasid)
1013 {
1014 	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
1015 	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1016 					PROTECTIONS);
1017 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
1018 		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
1019 	u32 mc_id;
1020 
1021 	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1022 			      MEMORY_CLIENT_ID);
1023 
1024 	dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1025 	       protections, vmid, pasid, addr,
1026 	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1027 			     MEMORY_CLIENT_RW) ?
1028 	       "write" : "read", block, mc_client, mc_id);
1029 }
1030 
1031 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1032 {
1033 	switch (mc_seq_vram_type) {
1034 	case MC_SEQ_MISC0__MT__GDDR1:
1035 		return AMDGPU_VRAM_TYPE_GDDR1;
1036 	case MC_SEQ_MISC0__MT__DDR2:
1037 		return AMDGPU_VRAM_TYPE_DDR2;
1038 	case MC_SEQ_MISC0__MT__GDDR3:
1039 		return AMDGPU_VRAM_TYPE_GDDR3;
1040 	case MC_SEQ_MISC0__MT__GDDR4:
1041 		return AMDGPU_VRAM_TYPE_GDDR4;
1042 	case MC_SEQ_MISC0__MT__GDDR5:
1043 		return AMDGPU_VRAM_TYPE_GDDR5;
1044 	case MC_SEQ_MISC0__MT__HBM:
1045 		return AMDGPU_VRAM_TYPE_HBM;
1046 	case MC_SEQ_MISC0__MT__DDR3:
1047 		return AMDGPU_VRAM_TYPE_DDR3;
1048 	default:
1049 		return AMDGPU_VRAM_TYPE_UNKNOWN;
1050 	}
1051 }
1052 
1053 static int gmc_v8_0_early_init(void *handle)
1054 {
1055 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1056 
1057 	gmc_v8_0_set_gmc_funcs(adev);
1058 	gmc_v8_0_set_irq_funcs(adev);
1059 
1060 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1061 	adev->gmc.shared_aperture_end =
1062 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1063 	adev->gmc.private_aperture_start =
1064 		adev->gmc.shared_aperture_end + 1;
1065 	adev->gmc.private_aperture_end =
1066 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1067 
1068 	return 0;
1069 }
1070 
1071 static int gmc_v8_0_late_init(void *handle)
1072 {
1073 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1074 
1075 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1076 		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1077 	else
1078 		return 0;
1079 }
1080 
1081 static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1082 {
1083 	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1084 	unsigned size;
1085 
1086 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1087 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
1088 	} else {
1089 		u32 viewport = RREG32(mmVIEWPORT_SIZE);
1090 		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1091 			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1092 			4);
1093 	}
1094 
1095 	return size;
1096 }
1097 
1098 #define mmMC_SEQ_MISC0_FIJI 0xA71
1099 
1100 static int gmc_v8_0_sw_init(void *handle)
1101 {
1102 	int r;
1103 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1104 
1105 	adev->num_vmhubs = 1;
1106 
1107 	if (adev->flags & AMD_IS_APU) {
1108 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1109 	} else {
1110 		u32 tmp;
1111 
1112 		if ((adev->asic_type == CHIP_FIJI) ||
1113 		    (adev->asic_type == CHIP_VEGAM))
1114 			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1115 		else
1116 			tmp = RREG32(mmMC_SEQ_MISC0);
1117 		tmp &= MC_SEQ_MISC0__MT__MASK;
1118 		adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1119 	}
1120 
1121 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1122 	if (r)
1123 		return r;
1124 
1125 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1126 	if (r)
1127 		return r;
1128 
1129 	/* Adjust VM size here.
1130 	 * Currently set to 4GB ((1 << 20) 4k pages).
1131 	 * Max GPUVM size for cayman and SI is 40 bits.
1132 	 */
1133 	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1134 
1135 	/* Set the internal MC address mask
1136 	 * This is the max address of the GPU's
1137 	 * internal address space.
1138 	 */
1139 	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1140 
1141 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
1142 	if (r) {
1143 		pr_warn("No suitable DMA available\n");
1144 		return r;
1145 	}
1146 	adev->need_swiotlb = drm_need_swiotlb(40);
1147 
1148 	r = gmc_v8_0_init_microcode(adev);
1149 	if (r) {
1150 		DRM_ERROR("Failed to load mc firmware!\n");
1151 		return r;
1152 	}
1153 
1154 	r = gmc_v8_0_mc_init(adev);
1155 	if (r)
1156 		return r;
1157 
1158 	amdgpu_gmc_get_vbios_allocations(adev);
1159 
1160 	/* Memory manager */
1161 	r = amdgpu_bo_init(adev);
1162 	if (r)
1163 		return r;
1164 
1165 	r = gmc_v8_0_gart_init(adev);
1166 	if (r)
1167 		return r;
1168 
1169 	/*
1170 	 * number of VMs
1171 	 * VMID 0 is reserved for System
1172 	 * amdgpu graphics/compute will use VMIDs 1-7
1173 	 * amdkfd will use VMIDs 8-15
1174 	 */
1175 	adev->vm_manager.first_kfd_vmid = 8;
1176 	amdgpu_vm_manager_init(adev);
1177 
1178 	/* base offset of vram pages */
1179 	if (adev->flags & AMD_IS_APU) {
1180 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1181 
1182 		tmp <<= 22;
1183 		adev->vm_manager.vram_base_offset = tmp;
1184 	} else {
1185 		adev->vm_manager.vram_base_offset = 0;
1186 	}
1187 
1188 	adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1189 					GFP_KERNEL);
1190 	if (!adev->gmc.vm_fault_info)
1191 		return -ENOMEM;
1192 	atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1193 
1194 	return 0;
1195 }
1196 
1197 static int gmc_v8_0_sw_fini(void *handle)
1198 {
1199 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1200 
1201 	amdgpu_gem_force_release(adev);
1202 	amdgpu_vm_manager_fini(adev);
1203 	kfree(adev->gmc.vm_fault_info);
1204 	amdgpu_gart_table_vram_free(adev);
1205 	amdgpu_bo_fini(adev);
1206 	release_firmware(adev->gmc.fw);
1207 	adev->gmc.fw = NULL;
1208 
1209 	return 0;
1210 }
1211 
1212 static int gmc_v8_0_hw_init(void *handle)
1213 {
1214 	int r;
1215 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1216 
1217 	gmc_v8_0_init_golden_registers(adev);
1218 
1219 	gmc_v8_0_mc_program(adev);
1220 
1221 	if (adev->asic_type == CHIP_TONGA) {
1222 		r = gmc_v8_0_tonga_mc_load_microcode(adev);
1223 		if (r) {
1224 			DRM_ERROR("Failed to load MC firmware!\n");
1225 			return r;
1226 		}
1227 	} else if (adev->asic_type == CHIP_POLARIS11 ||
1228 			adev->asic_type == CHIP_POLARIS10 ||
1229 			adev->asic_type == CHIP_POLARIS12) {
1230 		r = gmc_v8_0_polaris_mc_load_microcode(adev);
1231 		if (r) {
1232 			DRM_ERROR("Failed to load MC firmware!\n");
1233 			return r;
1234 		}
1235 	}
1236 
1237 	r = gmc_v8_0_gart_enable(adev);
1238 	if (r)
1239 		return r;
1240 
1241 	if (amdgpu_emu_mode == 1)
1242 		return amdgpu_gmc_vram_checking(adev);
1243 	else
1244 		return r;
1245 }
1246 
1247 static int gmc_v8_0_hw_fini(void *handle)
1248 {
1249 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1250 
1251 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1252 	gmc_v8_0_gart_disable(adev);
1253 
1254 	return 0;
1255 }
1256 
1257 static int gmc_v8_0_suspend(void *handle)
1258 {
1259 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1260 
1261 	gmc_v8_0_hw_fini(adev);
1262 
1263 	return 0;
1264 }
1265 
1266 static int gmc_v8_0_resume(void *handle)
1267 {
1268 	int r;
1269 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1270 
1271 	r = gmc_v8_0_hw_init(adev);
1272 	if (r)
1273 		return r;
1274 
1275 	amdgpu_vmid_reset_all(adev);
1276 
1277 	return 0;
1278 }
1279 
1280 static bool gmc_v8_0_is_idle(void *handle)
1281 {
1282 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1283 	u32 tmp = RREG32(mmSRBM_STATUS);
1284 
1285 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1286 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1287 		return false;
1288 
1289 	return true;
1290 }
1291 
1292 static int gmc_v8_0_wait_for_idle(void *handle)
1293 {
1294 	unsigned i;
1295 	u32 tmp;
1296 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297 
1298 	for (i = 0; i < adev->usec_timeout; i++) {
1299 		/* read MC_STATUS */
1300 		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1301 					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1302 					       SRBM_STATUS__MCC_BUSY_MASK |
1303 					       SRBM_STATUS__MCD_BUSY_MASK |
1304 					       SRBM_STATUS__VMC_BUSY_MASK |
1305 					       SRBM_STATUS__VMC1_BUSY_MASK);
1306 		if (!tmp)
1307 			return 0;
1308 		udelay(1);
1309 	}
1310 	return -ETIMEDOUT;
1311 
1312 }
1313 
1314 static bool gmc_v8_0_check_soft_reset(void *handle)
1315 {
1316 	u32 srbm_soft_reset = 0;
1317 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318 	u32 tmp = RREG32(mmSRBM_STATUS);
1319 
1320 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1321 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1322 						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1323 
1324 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1325 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1326 		if (!(adev->flags & AMD_IS_APU))
1327 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1328 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1329 	}
1330 	if (srbm_soft_reset) {
1331 		adev->gmc.srbm_soft_reset = srbm_soft_reset;
1332 		return true;
1333 	} else {
1334 		adev->gmc.srbm_soft_reset = 0;
1335 		return false;
1336 	}
1337 }
1338 
1339 static int gmc_v8_0_pre_soft_reset(void *handle)
1340 {
1341 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1342 
1343 	if (!adev->gmc.srbm_soft_reset)
1344 		return 0;
1345 
1346 	gmc_v8_0_mc_stop(adev);
1347 	if (gmc_v8_0_wait_for_idle(adev)) {
1348 		dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1349 	}
1350 
1351 	return 0;
1352 }
1353 
1354 static int gmc_v8_0_soft_reset(void *handle)
1355 {
1356 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1357 	u32 srbm_soft_reset;
1358 
1359 	if (!adev->gmc.srbm_soft_reset)
1360 		return 0;
1361 	srbm_soft_reset = adev->gmc.srbm_soft_reset;
1362 
1363 	if (srbm_soft_reset) {
1364 		u32 tmp;
1365 
1366 		tmp = RREG32(mmSRBM_SOFT_RESET);
1367 		tmp |= srbm_soft_reset;
1368 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1369 		WREG32(mmSRBM_SOFT_RESET, tmp);
1370 		tmp = RREG32(mmSRBM_SOFT_RESET);
1371 
1372 		udelay(50);
1373 
1374 		tmp &= ~srbm_soft_reset;
1375 		WREG32(mmSRBM_SOFT_RESET, tmp);
1376 		tmp = RREG32(mmSRBM_SOFT_RESET);
1377 
1378 		/* Wait a little for things to settle down */
1379 		udelay(50);
1380 	}
1381 
1382 	return 0;
1383 }
1384 
1385 static int gmc_v8_0_post_soft_reset(void *handle)
1386 {
1387 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1388 
1389 	if (!adev->gmc.srbm_soft_reset)
1390 		return 0;
1391 
1392 	gmc_v8_0_mc_resume(adev);
1393 	return 0;
1394 }
1395 
1396 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1397 					     struct amdgpu_irq_src *src,
1398 					     unsigned type,
1399 					     enum amdgpu_interrupt_state state)
1400 {
1401 	u32 tmp;
1402 	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1403 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1404 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1405 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1406 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1407 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1408 		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1409 
1410 	switch (state) {
1411 	case AMDGPU_IRQ_STATE_DISABLE:
1412 		/* system context */
1413 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1414 		tmp &= ~bits;
1415 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1416 		/* VMs */
1417 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1418 		tmp &= ~bits;
1419 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1420 		break;
1421 	case AMDGPU_IRQ_STATE_ENABLE:
1422 		/* system context */
1423 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1424 		tmp |= bits;
1425 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1426 		/* VMs */
1427 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1428 		tmp |= bits;
1429 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1430 		break;
1431 	default:
1432 		break;
1433 	}
1434 
1435 	return 0;
1436 }
1437 
1438 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1439 				      struct amdgpu_irq_src *source,
1440 				      struct amdgpu_iv_entry *entry)
1441 {
1442 	u32 addr, status, mc_client, vmid;
1443 
1444 	if (amdgpu_sriov_vf(adev)) {
1445 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1446 			entry->src_id, entry->src_data[0]);
1447 		dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1448 		return 0;
1449 	}
1450 
1451 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1452 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1453 	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1454 	/* reset addr and status */
1455 	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1456 
1457 	if (!addr && !status)
1458 		return 0;
1459 
1460 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1461 		gmc_v8_0_set_fault_enable_default(adev, false);
1462 
1463 	if (printk_ratelimit()) {
1464 		struct amdgpu_task_info task_info;
1465 
1466 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
1467 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1468 
1469 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
1470 			entry->src_id, entry->src_data[0], task_info.process_name,
1471 			task_info.tgid, task_info.task_name, task_info.pid);
1472 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1473 			addr);
1474 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1475 			status);
1476 		gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1477 					 entry->pasid);
1478 	}
1479 
1480 	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1481 			     VMID);
1482 	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1483 		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1484 		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1485 		u32 protections = REG_GET_FIELD(status,
1486 					VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1487 					PROTECTIONS);
1488 
1489 		info->vmid = vmid;
1490 		info->mc_id = REG_GET_FIELD(status,
1491 					    VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1492 					    MEMORY_CLIENT_ID);
1493 		info->status = status;
1494 		info->page_addr = addr;
1495 		info->prot_valid = protections & 0x7 ? true : false;
1496 		info->prot_read = protections & 0x8 ? true : false;
1497 		info->prot_write = protections & 0x10 ? true : false;
1498 		info->prot_exec = protections & 0x20 ? true : false;
1499 		mb();
1500 		atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1501 	}
1502 
1503 	return 0;
1504 }
1505 
1506 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1507 						     bool enable)
1508 {
1509 	uint32_t data;
1510 
1511 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1512 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1513 		data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1514 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1515 
1516 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1517 		data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1518 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1519 
1520 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1521 		data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1522 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1523 
1524 		data = RREG32(mmMC_XPB_CLK_GAT);
1525 		data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1526 		WREG32(mmMC_XPB_CLK_GAT, data);
1527 
1528 		data = RREG32(mmATC_MISC_CG);
1529 		data |= ATC_MISC_CG__ENABLE_MASK;
1530 		WREG32(mmATC_MISC_CG, data);
1531 
1532 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1533 		data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1534 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1535 
1536 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1537 		data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1538 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1539 
1540 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1541 		data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1542 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1543 
1544 		data = RREG32(mmVM_L2_CG);
1545 		data |= VM_L2_CG__ENABLE_MASK;
1546 		WREG32(mmVM_L2_CG, data);
1547 	} else {
1548 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1549 		data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1550 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1551 
1552 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1553 		data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1554 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1555 
1556 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1557 		data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1558 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1559 
1560 		data = RREG32(mmMC_XPB_CLK_GAT);
1561 		data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1562 		WREG32(mmMC_XPB_CLK_GAT, data);
1563 
1564 		data = RREG32(mmATC_MISC_CG);
1565 		data &= ~ATC_MISC_CG__ENABLE_MASK;
1566 		WREG32(mmATC_MISC_CG, data);
1567 
1568 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1569 		data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1570 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1571 
1572 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1573 		data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1574 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1575 
1576 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1577 		data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1578 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1579 
1580 		data = RREG32(mmVM_L2_CG);
1581 		data &= ~VM_L2_CG__ENABLE_MASK;
1582 		WREG32(mmVM_L2_CG, data);
1583 	}
1584 }
1585 
1586 static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1587 				       bool enable)
1588 {
1589 	uint32_t data;
1590 
1591 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1592 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1593 		data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1594 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1595 
1596 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1597 		data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1598 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1599 
1600 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1601 		data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1602 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1603 
1604 		data = RREG32(mmMC_XPB_CLK_GAT);
1605 		data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1606 		WREG32(mmMC_XPB_CLK_GAT, data);
1607 
1608 		data = RREG32(mmATC_MISC_CG);
1609 		data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1610 		WREG32(mmATC_MISC_CG, data);
1611 
1612 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1613 		data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1614 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1615 
1616 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1617 		data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1618 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1619 
1620 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1621 		data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1622 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1623 
1624 		data = RREG32(mmVM_L2_CG);
1625 		data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1626 		WREG32(mmVM_L2_CG, data);
1627 	} else {
1628 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1629 		data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1630 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1631 
1632 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1633 		data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1634 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1635 
1636 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1637 		data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1638 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1639 
1640 		data = RREG32(mmMC_XPB_CLK_GAT);
1641 		data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1642 		WREG32(mmMC_XPB_CLK_GAT, data);
1643 
1644 		data = RREG32(mmATC_MISC_CG);
1645 		data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1646 		WREG32(mmATC_MISC_CG, data);
1647 
1648 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1649 		data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1650 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1651 
1652 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1653 		data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1654 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1655 
1656 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1657 		data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1658 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1659 
1660 		data = RREG32(mmVM_L2_CG);
1661 		data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1662 		WREG32(mmVM_L2_CG, data);
1663 	}
1664 }
1665 
1666 static int gmc_v8_0_set_clockgating_state(void *handle,
1667 					  enum amd_clockgating_state state)
1668 {
1669 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1670 
1671 	if (amdgpu_sriov_vf(adev))
1672 		return 0;
1673 
1674 	switch (adev->asic_type) {
1675 	case CHIP_FIJI:
1676 		fiji_update_mc_medium_grain_clock_gating(adev,
1677 				state == AMD_CG_STATE_GATE);
1678 		fiji_update_mc_light_sleep(adev,
1679 				state == AMD_CG_STATE_GATE);
1680 		break;
1681 	default:
1682 		break;
1683 	}
1684 	return 0;
1685 }
1686 
1687 static int gmc_v8_0_set_powergating_state(void *handle,
1688 					  enum amd_powergating_state state)
1689 {
1690 	return 0;
1691 }
1692 
1693 static void gmc_v8_0_get_clockgating_state(void *handle, u64 *flags)
1694 {
1695 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1696 	int data;
1697 
1698 	if (amdgpu_sriov_vf(adev))
1699 		*flags = 0;
1700 
1701 	/* AMD_CG_SUPPORT_MC_MGCG */
1702 	data = RREG32(mmMC_HUB_MISC_HUB_CG);
1703 	if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1704 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
1705 
1706 	/* AMD_CG_SUPPORT_MC_LS */
1707 	if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1708 		*flags |= AMD_CG_SUPPORT_MC_LS;
1709 }
1710 
1711 static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1712 	.name = "gmc_v8_0",
1713 	.early_init = gmc_v8_0_early_init,
1714 	.late_init = gmc_v8_0_late_init,
1715 	.sw_init = gmc_v8_0_sw_init,
1716 	.sw_fini = gmc_v8_0_sw_fini,
1717 	.hw_init = gmc_v8_0_hw_init,
1718 	.hw_fini = gmc_v8_0_hw_fini,
1719 	.suspend = gmc_v8_0_suspend,
1720 	.resume = gmc_v8_0_resume,
1721 	.is_idle = gmc_v8_0_is_idle,
1722 	.wait_for_idle = gmc_v8_0_wait_for_idle,
1723 	.check_soft_reset = gmc_v8_0_check_soft_reset,
1724 	.pre_soft_reset = gmc_v8_0_pre_soft_reset,
1725 	.soft_reset = gmc_v8_0_soft_reset,
1726 	.post_soft_reset = gmc_v8_0_post_soft_reset,
1727 	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
1728 	.set_powergating_state = gmc_v8_0_set_powergating_state,
1729 	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
1730 };
1731 
1732 static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1733 	.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1734 	.flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
1735 	.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1736 	.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1737 	.set_prt = gmc_v8_0_set_prt,
1738 	.get_vm_pde = gmc_v8_0_get_vm_pde,
1739 	.get_vm_pte = gmc_v8_0_get_vm_pte,
1740 	.get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size,
1741 };
1742 
1743 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1744 	.set = gmc_v8_0_vm_fault_interrupt_state,
1745 	.process = gmc_v8_0_process_interrupt,
1746 };
1747 
1748 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1749 {
1750 	adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1751 }
1752 
1753 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1754 {
1755 	adev->gmc.vm_fault.num_types = 1;
1756 	adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1757 }
1758 
1759 const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1760 {
1761 	.type = AMD_IP_BLOCK_TYPE_GMC,
1762 	.major = 8,
1763 	.minor = 0,
1764 	.rev = 0,
1765 	.funcs = &gmc_v8_0_ip_funcs,
1766 };
1767 
1768 const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1769 {
1770 	.type = AMD_IP_BLOCK_TYPE_GMC,
1771 	.major = 8,
1772 	.minor = 1,
1773 	.rev = 0,
1774 	.funcs = &gmc_v8_0_ip_funcs,
1775 };
1776 
1777 const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1778 {
1779 	.type = AMD_IP_BLOCK_TYPE_GMC,
1780 	.major = 8,
1781 	.minor = 5,
1782 	.rev = 0,
1783 	.funcs = &gmc_v8_0_ip_funcs,
1784 };
1785