xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c (revision c62d3cd0ddd629606a3830aa22e9dcc6c2a0d3bf)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <drm/drmP.h>
25 #include <drm/drm_cache.h>
26 #include "amdgpu.h"
27 #include "gmc_v8_0.h"
28 #include "amdgpu_ucode.h"
29 #include "amdgpu_amdkfd.h"
30 #include "amdgpu_gem.h"
31 
32 #include "gmc/gmc_8_1_d.h"
33 #include "gmc/gmc_8_1_sh_mask.h"
34 
35 #include "bif/bif_5_0_d.h"
36 #include "bif/bif_5_0_sh_mask.h"
37 
38 #include "oss/oss_3_0_d.h"
39 #include "oss/oss_3_0_sh_mask.h"
40 
41 #include "dce/dce_10_0_d.h"
42 #include "dce/dce_10_0_sh_mask.h"
43 
44 #include "vid.h"
45 #include "vi.h"
46 
47 #include "amdgpu_atombios.h"
48 
49 #include "ivsrcid/ivsrcid_vislands30.h"
50 
51 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
52 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
53 static int gmc_v8_0_wait_for_idle(void *handle);
54 
55 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
56 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
57 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
58 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
59 
60 static const u32 golden_settings_tonga_a11[] =
61 {
62 	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
63 	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
64 	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
65 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
66 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
67 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
68 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
69 };
70 
71 static const u32 tonga_mgcg_cgcg_init[] =
72 {
73 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
74 };
75 
76 static const u32 golden_settings_fiji_a10[] =
77 {
78 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
79 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
80 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
81 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
82 };
83 
84 static const u32 fiji_mgcg_cgcg_init[] =
85 {
86 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
87 };
88 
89 static const u32 golden_settings_polaris11_a11[] =
90 {
91 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
92 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
93 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
94 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
95 };
96 
97 static const u32 golden_settings_polaris10_a11[] =
98 {
99 	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
100 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
101 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
102 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
103 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
104 };
105 
106 static const u32 cz_mgcg_cgcg_init[] =
107 {
108 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
109 };
110 
111 static const u32 stoney_mgcg_cgcg_init[] =
112 {
113 	mmATC_MISC_CG, 0xffffffff, 0x000c0200,
114 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
115 };
116 
117 static const u32 golden_settings_stoney_common[] =
118 {
119 	mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
120 	mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
121 };
122 
123 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
124 {
125 	switch (adev->asic_type) {
126 	case CHIP_FIJI:
127 		amdgpu_device_program_register_sequence(adev,
128 							fiji_mgcg_cgcg_init,
129 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
130 		amdgpu_device_program_register_sequence(adev,
131 							golden_settings_fiji_a10,
132 							ARRAY_SIZE(golden_settings_fiji_a10));
133 		break;
134 	case CHIP_TONGA:
135 		amdgpu_device_program_register_sequence(adev,
136 							tonga_mgcg_cgcg_init,
137 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
138 		amdgpu_device_program_register_sequence(adev,
139 							golden_settings_tonga_a11,
140 							ARRAY_SIZE(golden_settings_tonga_a11));
141 		break;
142 	case CHIP_POLARIS11:
143 	case CHIP_POLARIS12:
144 	case CHIP_VEGAM:
145 		amdgpu_device_program_register_sequence(adev,
146 							golden_settings_polaris11_a11,
147 							ARRAY_SIZE(golden_settings_polaris11_a11));
148 		break;
149 	case CHIP_POLARIS10:
150 		amdgpu_device_program_register_sequence(adev,
151 							golden_settings_polaris10_a11,
152 							ARRAY_SIZE(golden_settings_polaris10_a11));
153 		break;
154 	case CHIP_CARRIZO:
155 		amdgpu_device_program_register_sequence(adev,
156 							cz_mgcg_cgcg_init,
157 							ARRAY_SIZE(cz_mgcg_cgcg_init));
158 		break;
159 	case CHIP_STONEY:
160 		amdgpu_device_program_register_sequence(adev,
161 							stoney_mgcg_cgcg_init,
162 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
163 		amdgpu_device_program_register_sequence(adev,
164 							golden_settings_stoney_common,
165 							ARRAY_SIZE(golden_settings_stoney_common));
166 		break;
167 	default:
168 		break;
169 	}
170 }
171 
172 static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
173 {
174 	u32 blackout;
175 
176 	gmc_v8_0_wait_for_idle(adev);
177 
178 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
179 	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
180 		/* Block CPU access */
181 		WREG32(mmBIF_FB_EN, 0);
182 		/* blackout the MC */
183 		blackout = REG_SET_FIELD(blackout,
184 					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
185 		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
186 	}
187 	/* wait for the MC to settle */
188 	udelay(100);
189 }
190 
191 static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
192 {
193 	u32 tmp;
194 
195 	/* unblackout the MC */
196 	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
197 	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
198 	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
199 	/* allow CPU access */
200 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
201 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
202 	WREG32(mmBIF_FB_EN, tmp);
203 }
204 
205 /**
206  * gmc_v8_0_init_microcode - load ucode images from disk
207  *
208  * @adev: amdgpu_device pointer
209  *
210  * Use the firmware interface to load the ucode images into
211  * the driver (not loaded into hw).
212  * Returns 0 on success, error on failure.
213  */
214 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
215 {
216 	const char *chip_name;
217 	char fw_name[30];
218 	int err;
219 
220 	DRM_DEBUG("\n");
221 
222 	switch (adev->asic_type) {
223 	case CHIP_TONGA:
224 		chip_name = "tonga";
225 		break;
226 	case CHIP_POLARIS11:
227 		chip_name = "polaris11";
228 		break;
229 	case CHIP_POLARIS10:
230 		chip_name = "polaris10";
231 		break;
232 	case CHIP_POLARIS12:
233 		chip_name = "polaris12";
234 		break;
235 	case CHIP_FIJI:
236 	case CHIP_CARRIZO:
237 	case CHIP_STONEY:
238 	case CHIP_VEGAM:
239 		return 0;
240 	default: BUG();
241 	}
242 
243 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
244 	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
245 	if (err)
246 		goto out;
247 	err = amdgpu_ucode_validate(adev->gmc.fw);
248 
249 out:
250 	if (err) {
251 		pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
252 		release_firmware(adev->gmc.fw);
253 		adev->gmc.fw = NULL;
254 	}
255 	return err;
256 }
257 
258 /**
259  * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
260  *
261  * @adev: amdgpu_device pointer
262  *
263  * Load the GDDR MC ucode into the hw (CIK).
264  * Returns 0 on success, error on failure.
265  */
266 static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
267 {
268 	const struct mc_firmware_header_v1_0 *hdr;
269 	const __le32 *fw_data = NULL;
270 	const __le32 *io_mc_regs = NULL;
271 	u32 running;
272 	int i, ucode_size, regs_size;
273 
274 	/* Skip MC ucode loading on SR-IOV capable boards.
275 	 * vbios does this for us in asic_init in that case.
276 	 * Skip MC ucode loading on VF, because hypervisor will do that
277 	 * for this adaptor.
278 	 */
279 	if (amdgpu_sriov_bios(adev))
280 		return 0;
281 
282 	if (!adev->gmc.fw)
283 		return -EINVAL;
284 
285 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
286 	amdgpu_ucode_print_mc_hdr(&hdr->header);
287 
288 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
289 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
290 	io_mc_regs = (const __le32 *)
291 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
292 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
293 	fw_data = (const __le32 *)
294 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
295 
296 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
297 
298 	if (running == 0) {
299 		/* reset the engine and set to writable */
300 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
301 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
302 
303 		/* load mc io regs */
304 		for (i = 0; i < regs_size; i++) {
305 			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
306 			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
307 		}
308 		/* load the MC ucode */
309 		for (i = 0; i < ucode_size; i++)
310 			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
311 
312 		/* put the engine back into the active state */
313 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
314 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
315 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
316 
317 		/* wait for training to complete */
318 		for (i = 0; i < adev->usec_timeout; i++) {
319 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
320 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
321 				break;
322 			udelay(1);
323 		}
324 		for (i = 0; i < adev->usec_timeout; i++) {
325 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
326 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
327 				break;
328 			udelay(1);
329 		}
330 	}
331 
332 	return 0;
333 }
334 
335 static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
336 {
337 	const struct mc_firmware_header_v1_0 *hdr;
338 	const __le32 *fw_data = NULL;
339 	const __le32 *io_mc_regs = NULL;
340 	u32 data, vbios_version;
341 	int i, ucode_size, regs_size;
342 
343 	/* Skip MC ucode loading on SR-IOV capable boards.
344 	 * vbios does this for us in asic_init in that case.
345 	 * Skip MC ucode loading on VF, because hypervisor will do that
346 	 * for this adaptor.
347 	 */
348 	if (amdgpu_sriov_bios(adev))
349 		return 0;
350 
351 	WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
352 	data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
353 	vbios_version = data & 0xf;
354 
355 	if (vbios_version == 0)
356 		return 0;
357 
358 	if (!adev->gmc.fw)
359 		return -EINVAL;
360 
361 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
362 	amdgpu_ucode_print_mc_hdr(&hdr->header);
363 
364 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
365 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
366 	io_mc_regs = (const __le32 *)
367 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
368 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
369 	fw_data = (const __le32 *)
370 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
371 
372 	data = RREG32(mmMC_SEQ_MISC0);
373 	data &= ~(0x40);
374 	WREG32(mmMC_SEQ_MISC0, data);
375 
376 	/* load mc io regs */
377 	for (i = 0; i < regs_size; i++) {
378 		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
379 		WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
380 	}
381 
382 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
383 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
384 
385 	/* load the MC ucode */
386 	for (i = 0; i < ucode_size; i++)
387 		WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
388 
389 	/* put the engine back into the active state */
390 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
391 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
392 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
393 
394 	/* wait for training to complete */
395 	for (i = 0; i < adev->usec_timeout; i++) {
396 		data = RREG32(mmMC_SEQ_MISC0);
397 		if (data & 0x80)
398 			break;
399 		udelay(1);
400 	}
401 
402 	return 0;
403 }
404 
405 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
406 				       struct amdgpu_gmc *mc)
407 {
408 	u64 base = 0;
409 
410 	if (!amdgpu_sriov_vf(adev))
411 		base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
412 	base <<= 24;
413 
414 	amdgpu_device_vram_location(adev, &adev->gmc, base);
415 	amdgpu_device_gart_location(adev, mc);
416 }
417 
418 /**
419  * gmc_v8_0_mc_program - program the GPU memory controller
420  *
421  * @adev: amdgpu_device pointer
422  *
423  * Set the location of vram, gart, and AGP in the GPU's
424  * physical address space (CIK).
425  */
426 static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
427 {
428 	u32 tmp;
429 	int i, j;
430 
431 	/* Initialize HDP */
432 	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
433 		WREG32((0xb05 + j), 0x00000000);
434 		WREG32((0xb06 + j), 0x00000000);
435 		WREG32((0xb07 + j), 0x00000000);
436 		WREG32((0xb08 + j), 0x00000000);
437 		WREG32((0xb09 + j), 0x00000000);
438 	}
439 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
440 
441 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
442 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
443 	}
444 	if (adev->mode_info.num_crtc) {
445 		/* Lockout access through VGA aperture*/
446 		tmp = RREG32(mmVGA_HDP_CONTROL);
447 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
448 		WREG32(mmVGA_HDP_CONTROL, tmp);
449 
450 		/* disable VGA render */
451 		tmp = RREG32(mmVGA_RENDER_CONTROL);
452 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
453 		WREG32(mmVGA_RENDER_CONTROL, tmp);
454 	}
455 	/* Update configuration */
456 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
457 	       adev->gmc.vram_start >> 12);
458 	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
459 	       adev->gmc.vram_end >> 12);
460 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
461 	       adev->vram_scratch.gpu_addr >> 12);
462 
463 	if (amdgpu_sriov_vf(adev)) {
464 		tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
465 		tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
466 		WREG32(mmMC_VM_FB_LOCATION, tmp);
467 		/* XXX double check these! */
468 		WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
469 		WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
470 		WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
471 	}
472 
473 	WREG32(mmMC_VM_AGP_BASE, 0);
474 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
475 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
476 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
477 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
478 	}
479 
480 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
481 
482 	tmp = RREG32(mmHDP_MISC_CNTL);
483 	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
484 	WREG32(mmHDP_MISC_CNTL, tmp);
485 
486 	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
487 	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
488 }
489 
490 /**
491  * gmc_v8_0_mc_init - initialize the memory controller driver params
492  *
493  * @adev: amdgpu_device pointer
494  *
495  * Look up the amount of vram, vram width, and decide how to place
496  * vram and gart within the GPU's physical address space (CIK).
497  * Returns 0 for success.
498  */
499 static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
500 {
501 	int r;
502 
503 	adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
504 	if (!adev->gmc.vram_width) {
505 		u32 tmp;
506 		int chansize, numchan;
507 
508 		/* Get VRAM informations */
509 		tmp = RREG32(mmMC_ARB_RAMCFG);
510 		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
511 			chansize = 64;
512 		} else {
513 			chansize = 32;
514 		}
515 		tmp = RREG32(mmMC_SHARED_CHMAP);
516 		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
517 		case 0:
518 		default:
519 			numchan = 1;
520 			break;
521 		case 1:
522 			numchan = 2;
523 			break;
524 		case 2:
525 			numchan = 4;
526 			break;
527 		case 3:
528 			numchan = 8;
529 			break;
530 		case 4:
531 			numchan = 3;
532 			break;
533 		case 5:
534 			numchan = 6;
535 			break;
536 		case 6:
537 			numchan = 10;
538 			break;
539 		case 7:
540 			numchan = 12;
541 			break;
542 		case 8:
543 			numchan = 16;
544 			break;
545 		}
546 		adev->gmc.vram_width = numchan * chansize;
547 	}
548 	/* size in MB on si */
549 	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
550 	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
551 
552 	if (!(adev->flags & AMD_IS_APU)) {
553 		r = amdgpu_device_resize_fb_bar(adev);
554 		if (r)
555 			return r;
556 	}
557 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
558 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
559 
560 #ifdef CONFIG_X86_64
561 	if (adev->flags & AMD_IS_APU) {
562 		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
563 		adev->gmc.aper_size = adev->gmc.real_vram_size;
564 	}
565 #endif
566 
567 	/* In case the PCI BAR is larger than the actual amount of vram */
568 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
569 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
570 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
571 
572 	/* set the gart size */
573 	if (amdgpu_gart_size == -1) {
574 		switch (adev->asic_type) {
575 		case CHIP_POLARIS10: /* all engines support GPUVM */
576 		case CHIP_POLARIS11: /* all engines support GPUVM */
577 		case CHIP_POLARIS12: /* all engines support GPUVM */
578 		case CHIP_VEGAM:     /* all engines support GPUVM */
579 		default:
580 			adev->gmc.gart_size = 256ULL << 20;
581 			break;
582 		case CHIP_TONGA:   /* UVD, VCE do not support GPUVM */
583 		case CHIP_FIJI:    /* UVD, VCE do not support GPUVM */
584 		case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
585 		case CHIP_STONEY:  /* UVD does not support GPUVM, DCE SG support */
586 			adev->gmc.gart_size = 1024ULL << 20;
587 			break;
588 		}
589 	} else {
590 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
591 	}
592 
593 	gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
594 
595 	return 0;
596 }
597 
598 /*
599  * GART
600  * VMID 0 is the physical GPU addresses as used by the kernel.
601  * VMIDs 1-15 are used for userspace clients and are handled
602  * by the amdgpu vm/hsa code.
603  */
604 
605 /**
606  * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
607  *
608  * @adev: amdgpu_device pointer
609  * @vmid: vm instance to flush
610  *
611  * Flush the TLB for the requested page table (CIK).
612  */
613 static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
614 					uint32_t vmid)
615 {
616 	/* bits 0-15 are the VM contexts0-15 */
617 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
618 }
619 
620 static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
621 					    unsigned vmid, uint64_t pd_addr)
622 {
623 	uint32_t reg;
624 
625 	if (vmid < 8)
626 		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
627 	else
628 		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
629 	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
630 
631 	/* bits 0-15 are the VM contexts0-15 */
632 	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
633 
634 	return pd_addr;
635 }
636 
637 static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
638 					unsigned pasid)
639 {
640 	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
641 }
642 
643 /**
644  * gmc_v8_0_set_pte_pde - update the page tables using MMIO
645  *
646  * @adev: amdgpu_device pointer
647  * @cpu_pt_addr: cpu address of the page table
648  * @gpu_page_idx: entry in the page table to update
649  * @addr: dst addr to write into pte/pde
650  * @flags: access flags
651  *
652  * Update the page tables using the CPU.
653  */
654 static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
655 				uint32_t gpu_page_idx, uint64_t addr,
656 				uint64_t flags)
657 {
658 	void __iomem *ptr = (void *)cpu_pt_addr;
659 	uint64_t value;
660 
661 	/*
662 	 * PTE format on VI:
663 	 * 63:40 reserved
664 	 * 39:12 4k physical page base address
665 	 * 11:7 fragment
666 	 * 6 write
667 	 * 5 read
668 	 * 4 exe
669 	 * 3 reserved
670 	 * 2 snooped
671 	 * 1 system
672 	 * 0 valid
673 	 *
674 	 * PDE format on VI:
675 	 * 63:59 block fragment size
676 	 * 58:40 reserved
677 	 * 39:1 physical base address of PTE
678 	 * bits 5:1 must be 0.
679 	 * 0 valid
680 	 */
681 	value = addr & 0x000000FFFFFFF000ULL;
682 	value |= flags;
683 	writeq(value, ptr + (gpu_page_idx * 8));
684 
685 	return 0;
686 }
687 
688 static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
689 					  uint32_t flags)
690 {
691 	uint64_t pte_flag = 0;
692 
693 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
694 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
695 	if (flags & AMDGPU_VM_PAGE_READABLE)
696 		pte_flag |= AMDGPU_PTE_READABLE;
697 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
698 		pte_flag |= AMDGPU_PTE_WRITEABLE;
699 	if (flags & AMDGPU_VM_PAGE_PRT)
700 		pte_flag |= AMDGPU_PTE_PRT;
701 
702 	return pte_flag;
703 }
704 
705 static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
706 				uint64_t *addr, uint64_t *flags)
707 {
708 	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
709 }
710 
711 /**
712  * gmc_v8_0_set_fault_enable_default - update VM fault handling
713  *
714  * @adev: amdgpu_device pointer
715  * @value: true redirects VM faults to the default page
716  */
717 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
718 					      bool value)
719 {
720 	u32 tmp;
721 
722 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
723 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
724 			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
725 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
726 			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
727 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
728 			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
729 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
730 			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
731 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
732 			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
733 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
734 			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
735 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
736 			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
737 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
738 }
739 
740 /**
741  * gmc_v8_0_set_prt - set PRT VM fault
742  *
743  * @adev: amdgpu_device pointer
744  * @enable: enable/disable VM fault handling for PRT
745 */
746 static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
747 {
748 	u32 tmp;
749 
750 	if (enable && !adev->gmc.prt_warning) {
751 		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
752 		adev->gmc.prt_warning = true;
753 	}
754 
755 	tmp = RREG32(mmVM_PRT_CNTL);
756 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
757 			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
758 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
759 			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
760 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
761 			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
762 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
763 			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
764 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
765 			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
766 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
767 			    L1_TLB_STORE_INVALID_ENTRIES, enable);
768 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
769 			    MASK_PDE0_FAULT, enable);
770 	WREG32(mmVM_PRT_CNTL, tmp);
771 
772 	if (enable) {
773 		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
774 		uint32_t high = adev->vm_manager.max_pfn -
775 			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
776 
777 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
778 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
779 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
780 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
781 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
782 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
783 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
784 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
785 	} else {
786 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
787 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
788 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
789 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
790 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
791 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
792 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
793 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
794 	}
795 }
796 
797 /**
798  * gmc_v8_0_gart_enable - gart enable
799  *
800  * @adev: amdgpu_device pointer
801  *
802  * This sets up the TLBs, programs the page tables for VMID0,
803  * sets up the hw for VMIDs 1-15 which are allocated on
804  * demand, and sets up the global locations for the LDS, GDS,
805  * and GPUVM for FSA64 clients (CIK).
806  * Returns 0 for success, errors for failure.
807  */
808 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
809 {
810 	int r, i;
811 	u32 tmp, field;
812 
813 	if (adev->gart.robj == NULL) {
814 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
815 		return -EINVAL;
816 	}
817 	r = amdgpu_gart_table_vram_pin(adev);
818 	if (r)
819 		return r;
820 	/* Setup TLB control */
821 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
822 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
823 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
824 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
825 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
826 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
827 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
828 	/* Setup L2 cache */
829 	tmp = RREG32(mmVM_L2_CNTL);
830 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
831 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
832 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
833 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
834 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
835 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
836 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
837 	WREG32(mmVM_L2_CNTL, tmp);
838 	tmp = RREG32(mmVM_L2_CNTL2);
839 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
840 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
841 	WREG32(mmVM_L2_CNTL2, tmp);
842 
843 	field = adev->vm_manager.fragment_size;
844 	tmp = RREG32(mmVM_L2_CNTL3);
845 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
846 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
847 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
848 	WREG32(mmVM_L2_CNTL3, tmp);
849 	/* XXX: set to enable PTE/PDE in system memory */
850 	tmp = RREG32(mmVM_L2_CNTL4);
851 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
852 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
853 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
854 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
855 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
856 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
857 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
858 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
859 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
860 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
861 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
862 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
863 	WREG32(mmVM_L2_CNTL4, tmp);
864 	/* setup context0 */
865 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
866 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
867 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
868 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
869 			(u32)(adev->dummy_page_addr >> 12));
870 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
871 	tmp = RREG32(mmVM_CONTEXT0_CNTL);
872 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
873 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
874 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
875 	WREG32(mmVM_CONTEXT0_CNTL, tmp);
876 
877 	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
878 	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
879 	WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
880 
881 	/* empty context1-15 */
882 	/* FIXME start with 4G, once using 2 level pt switch to full
883 	 * vm size space
884 	 */
885 	/* set vm size, must be a multiple of 4 */
886 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
887 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
888 	for (i = 1; i < 16; i++) {
889 		if (i < 8)
890 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
891 			       adev->gart.table_addr >> 12);
892 		else
893 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
894 			       adev->gart.table_addr >> 12);
895 	}
896 
897 	/* enable context1-15 */
898 	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
899 	       (u32)(adev->dummy_page_addr >> 12));
900 	WREG32(mmVM_CONTEXT1_CNTL2, 4);
901 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
902 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
903 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
904 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
905 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
906 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
907 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
908 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
909 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
910 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
911 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
912 			    adev->vm_manager.block_size - 9);
913 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
914 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
915 		gmc_v8_0_set_fault_enable_default(adev, false);
916 	else
917 		gmc_v8_0_set_fault_enable_default(adev, true);
918 
919 	gmc_v8_0_flush_gpu_tlb(adev, 0);
920 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
921 		 (unsigned)(adev->gmc.gart_size >> 20),
922 		 (unsigned long long)adev->gart.table_addr);
923 	adev->gart.ready = true;
924 	return 0;
925 }
926 
927 static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
928 {
929 	int r;
930 
931 	if (adev->gart.robj) {
932 		WARN(1, "R600 PCIE GART already initialized\n");
933 		return 0;
934 	}
935 	/* Initialize common gart structure */
936 	r = amdgpu_gart_init(adev);
937 	if (r)
938 		return r;
939 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
940 	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
941 	return amdgpu_gart_table_vram_alloc(adev);
942 }
943 
944 /**
945  * gmc_v8_0_gart_disable - gart disable
946  *
947  * @adev: amdgpu_device pointer
948  *
949  * This disables all VM page table (CIK).
950  */
951 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
952 {
953 	u32 tmp;
954 
955 	/* Disable all tables */
956 	WREG32(mmVM_CONTEXT0_CNTL, 0);
957 	WREG32(mmVM_CONTEXT1_CNTL, 0);
958 	/* Setup TLB control */
959 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
960 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
961 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
962 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
963 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
964 	/* Setup L2 cache */
965 	tmp = RREG32(mmVM_L2_CNTL);
966 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
967 	WREG32(mmVM_L2_CNTL, tmp);
968 	WREG32(mmVM_L2_CNTL2, 0);
969 	amdgpu_gart_table_vram_unpin(adev);
970 }
971 
972 /**
973  * gmc_v8_0_vm_decode_fault - print human readable fault info
974  *
975  * @adev: amdgpu_device pointer
976  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
977  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
978  *
979  * Print human readable fault information (CIK).
980  */
981 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
982 				     u32 addr, u32 mc_client, unsigned pasid)
983 {
984 	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
985 	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
986 					PROTECTIONS);
987 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
988 		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
989 	u32 mc_id;
990 
991 	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
992 			      MEMORY_CLIENT_ID);
993 
994 	dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
995 	       protections, vmid, pasid, addr,
996 	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
997 			     MEMORY_CLIENT_RW) ?
998 	       "write" : "read", block, mc_client, mc_id);
999 }
1000 
1001 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1002 {
1003 	switch (mc_seq_vram_type) {
1004 	case MC_SEQ_MISC0__MT__GDDR1:
1005 		return AMDGPU_VRAM_TYPE_GDDR1;
1006 	case MC_SEQ_MISC0__MT__DDR2:
1007 		return AMDGPU_VRAM_TYPE_DDR2;
1008 	case MC_SEQ_MISC0__MT__GDDR3:
1009 		return AMDGPU_VRAM_TYPE_GDDR3;
1010 	case MC_SEQ_MISC0__MT__GDDR4:
1011 		return AMDGPU_VRAM_TYPE_GDDR4;
1012 	case MC_SEQ_MISC0__MT__GDDR5:
1013 		return AMDGPU_VRAM_TYPE_GDDR5;
1014 	case MC_SEQ_MISC0__MT__HBM:
1015 		return AMDGPU_VRAM_TYPE_HBM;
1016 	case MC_SEQ_MISC0__MT__DDR3:
1017 		return AMDGPU_VRAM_TYPE_DDR3;
1018 	default:
1019 		return AMDGPU_VRAM_TYPE_UNKNOWN;
1020 	}
1021 }
1022 
1023 static int gmc_v8_0_early_init(void *handle)
1024 {
1025 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1026 
1027 	gmc_v8_0_set_gmc_funcs(adev);
1028 	gmc_v8_0_set_irq_funcs(adev);
1029 
1030 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1031 	adev->gmc.shared_aperture_end =
1032 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1033 	adev->gmc.private_aperture_start =
1034 		adev->gmc.shared_aperture_end + 1;
1035 	adev->gmc.private_aperture_end =
1036 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1037 
1038 	return 0;
1039 }
1040 
1041 static int gmc_v8_0_late_init(void *handle)
1042 {
1043 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1044 
1045 	amdgpu_bo_late_init(adev);
1046 
1047 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1048 		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1049 	else
1050 		return 0;
1051 }
1052 
1053 static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1054 {
1055 	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1056 	unsigned size;
1057 
1058 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1059 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1060 	} else {
1061 		u32 viewport = RREG32(mmVIEWPORT_SIZE);
1062 		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1063 			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1064 			4);
1065 	}
1066 	/* return 0 if the pre-OS buffer uses up most of vram */
1067 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1068 		return 0;
1069 	return size;
1070 }
1071 
1072 #define mmMC_SEQ_MISC0_FIJI 0xA71
1073 
1074 static int gmc_v8_0_sw_init(void *handle)
1075 {
1076 	int r;
1077 	int dma_bits;
1078 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1079 
1080 	if (adev->flags & AMD_IS_APU) {
1081 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1082 	} else {
1083 		u32 tmp;
1084 
1085 		if ((adev->asic_type == CHIP_FIJI) ||
1086 		    (adev->asic_type == CHIP_VEGAM))
1087 			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1088 		else
1089 			tmp = RREG32(mmMC_SEQ_MISC0);
1090 		tmp &= MC_SEQ_MISC0__MT__MASK;
1091 		adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1092 	}
1093 
1094 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1095 	if (r)
1096 		return r;
1097 
1098 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1099 	if (r)
1100 		return r;
1101 
1102 	/* Adjust VM size here.
1103 	 * Currently set to 4GB ((1 << 20) 4k pages).
1104 	 * Max GPUVM size for cayman and SI is 40 bits.
1105 	 */
1106 	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1107 
1108 	/* Set the internal MC address mask
1109 	 * This is the max address of the GPU's
1110 	 * internal address space.
1111 	 */
1112 	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1113 
1114 	/* set DMA mask + need_dma32 flags.
1115 	 * PCIE - can handle 40-bits.
1116 	 * IGP - can handle 40-bits
1117 	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1118 	 */
1119 	adev->need_dma32 = false;
1120 	dma_bits = adev->need_dma32 ? 32 : 40;
1121 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1122 	if (r) {
1123 		adev->need_dma32 = true;
1124 		dma_bits = 32;
1125 		pr_warn("amdgpu: No suitable DMA available\n");
1126 	}
1127 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1128 	if (r) {
1129 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1130 		pr_warn("amdgpu: No coherent DMA available\n");
1131 	}
1132 	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
1133 
1134 	r = gmc_v8_0_init_microcode(adev);
1135 	if (r) {
1136 		DRM_ERROR("Failed to load mc firmware!\n");
1137 		return r;
1138 	}
1139 
1140 	r = gmc_v8_0_mc_init(adev);
1141 	if (r)
1142 		return r;
1143 
1144 	adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev);
1145 
1146 	/* Memory manager */
1147 	r = amdgpu_bo_init(adev);
1148 	if (r)
1149 		return r;
1150 
1151 	r = gmc_v8_0_gart_init(adev);
1152 	if (r)
1153 		return r;
1154 
1155 	/*
1156 	 * number of VMs
1157 	 * VMID 0 is reserved for System
1158 	 * amdgpu graphics/compute will use VMIDs 1-7
1159 	 * amdkfd will use VMIDs 8-15
1160 	 */
1161 	adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
1162 	amdgpu_vm_manager_init(adev);
1163 
1164 	/* base offset of vram pages */
1165 	if (adev->flags & AMD_IS_APU) {
1166 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1167 
1168 		tmp <<= 22;
1169 		adev->vm_manager.vram_base_offset = tmp;
1170 	} else {
1171 		adev->vm_manager.vram_base_offset = 0;
1172 	}
1173 
1174 	adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1175 					GFP_KERNEL);
1176 	if (!adev->gmc.vm_fault_info)
1177 		return -ENOMEM;
1178 	atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1179 
1180 	return 0;
1181 }
1182 
1183 static int gmc_v8_0_sw_fini(void *handle)
1184 {
1185 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1186 
1187 	amdgpu_gem_force_release(adev);
1188 	amdgpu_vm_manager_fini(adev);
1189 	kfree(adev->gmc.vm_fault_info);
1190 	amdgpu_gart_table_vram_free(adev);
1191 	amdgpu_bo_fini(adev);
1192 	amdgpu_gart_fini(adev);
1193 	release_firmware(adev->gmc.fw);
1194 	adev->gmc.fw = NULL;
1195 
1196 	return 0;
1197 }
1198 
1199 static int gmc_v8_0_hw_init(void *handle)
1200 {
1201 	int r;
1202 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1203 
1204 	gmc_v8_0_init_golden_registers(adev);
1205 
1206 	gmc_v8_0_mc_program(adev);
1207 
1208 	if (adev->asic_type == CHIP_TONGA) {
1209 		r = gmc_v8_0_tonga_mc_load_microcode(adev);
1210 		if (r) {
1211 			DRM_ERROR("Failed to load MC firmware!\n");
1212 			return r;
1213 		}
1214 	} else if (adev->asic_type == CHIP_POLARIS11 ||
1215 			adev->asic_type == CHIP_POLARIS10 ||
1216 			adev->asic_type == CHIP_POLARIS12) {
1217 		r = gmc_v8_0_polaris_mc_load_microcode(adev);
1218 		if (r) {
1219 			DRM_ERROR("Failed to load MC firmware!\n");
1220 			return r;
1221 		}
1222 	}
1223 
1224 	r = gmc_v8_0_gart_enable(adev);
1225 	if (r)
1226 		return r;
1227 
1228 	return r;
1229 }
1230 
1231 static int gmc_v8_0_hw_fini(void *handle)
1232 {
1233 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1234 
1235 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1236 	gmc_v8_0_gart_disable(adev);
1237 
1238 	return 0;
1239 }
1240 
1241 static int gmc_v8_0_suspend(void *handle)
1242 {
1243 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1244 
1245 	gmc_v8_0_hw_fini(adev);
1246 
1247 	return 0;
1248 }
1249 
1250 static int gmc_v8_0_resume(void *handle)
1251 {
1252 	int r;
1253 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1254 
1255 	r = gmc_v8_0_hw_init(adev);
1256 	if (r)
1257 		return r;
1258 
1259 	amdgpu_vmid_reset_all(adev);
1260 
1261 	return 0;
1262 }
1263 
1264 static bool gmc_v8_0_is_idle(void *handle)
1265 {
1266 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1267 	u32 tmp = RREG32(mmSRBM_STATUS);
1268 
1269 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1270 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1271 		return false;
1272 
1273 	return true;
1274 }
1275 
1276 static int gmc_v8_0_wait_for_idle(void *handle)
1277 {
1278 	unsigned i;
1279 	u32 tmp;
1280 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1281 
1282 	for (i = 0; i < adev->usec_timeout; i++) {
1283 		/* read MC_STATUS */
1284 		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1285 					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1286 					       SRBM_STATUS__MCC_BUSY_MASK |
1287 					       SRBM_STATUS__MCD_BUSY_MASK |
1288 					       SRBM_STATUS__VMC_BUSY_MASK |
1289 					       SRBM_STATUS__VMC1_BUSY_MASK);
1290 		if (!tmp)
1291 			return 0;
1292 		udelay(1);
1293 	}
1294 	return -ETIMEDOUT;
1295 
1296 }
1297 
1298 static bool gmc_v8_0_check_soft_reset(void *handle)
1299 {
1300 	u32 srbm_soft_reset = 0;
1301 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1302 	u32 tmp = RREG32(mmSRBM_STATUS);
1303 
1304 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1305 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1306 						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1307 
1308 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1309 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1310 		if (!(adev->flags & AMD_IS_APU))
1311 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1312 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1313 	}
1314 	if (srbm_soft_reset) {
1315 		adev->gmc.srbm_soft_reset = srbm_soft_reset;
1316 		return true;
1317 	} else {
1318 		adev->gmc.srbm_soft_reset = 0;
1319 		return false;
1320 	}
1321 }
1322 
1323 static int gmc_v8_0_pre_soft_reset(void *handle)
1324 {
1325 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1326 
1327 	if (!adev->gmc.srbm_soft_reset)
1328 		return 0;
1329 
1330 	gmc_v8_0_mc_stop(adev);
1331 	if (gmc_v8_0_wait_for_idle(adev)) {
1332 		dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1333 	}
1334 
1335 	return 0;
1336 }
1337 
1338 static int gmc_v8_0_soft_reset(void *handle)
1339 {
1340 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1341 	u32 srbm_soft_reset;
1342 
1343 	if (!adev->gmc.srbm_soft_reset)
1344 		return 0;
1345 	srbm_soft_reset = adev->gmc.srbm_soft_reset;
1346 
1347 	if (srbm_soft_reset) {
1348 		u32 tmp;
1349 
1350 		tmp = RREG32(mmSRBM_SOFT_RESET);
1351 		tmp |= srbm_soft_reset;
1352 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1353 		WREG32(mmSRBM_SOFT_RESET, tmp);
1354 		tmp = RREG32(mmSRBM_SOFT_RESET);
1355 
1356 		udelay(50);
1357 
1358 		tmp &= ~srbm_soft_reset;
1359 		WREG32(mmSRBM_SOFT_RESET, tmp);
1360 		tmp = RREG32(mmSRBM_SOFT_RESET);
1361 
1362 		/* Wait a little for things to settle down */
1363 		udelay(50);
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 static int gmc_v8_0_post_soft_reset(void *handle)
1370 {
1371 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1372 
1373 	if (!adev->gmc.srbm_soft_reset)
1374 		return 0;
1375 
1376 	gmc_v8_0_mc_resume(adev);
1377 	return 0;
1378 }
1379 
1380 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1381 					     struct amdgpu_irq_src *src,
1382 					     unsigned type,
1383 					     enum amdgpu_interrupt_state state)
1384 {
1385 	u32 tmp;
1386 	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1387 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1388 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1389 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1390 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1391 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1392 		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1393 
1394 	switch (state) {
1395 	case AMDGPU_IRQ_STATE_DISABLE:
1396 		/* system context */
1397 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1398 		tmp &= ~bits;
1399 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1400 		/* VMs */
1401 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1402 		tmp &= ~bits;
1403 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1404 		break;
1405 	case AMDGPU_IRQ_STATE_ENABLE:
1406 		/* system context */
1407 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1408 		tmp |= bits;
1409 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1410 		/* VMs */
1411 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1412 		tmp |= bits;
1413 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1414 		break;
1415 	default:
1416 		break;
1417 	}
1418 
1419 	return 0;
1420 }
1421 
1422 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1423 				      struct amdgpu_irq_src *source,
1424 				      struct amdgpu_iv_entry *entry)
1425 {
1426 	u32 addr, status, mc_client, vmid;
1427 
1428 	if (amdgpu_sriov_vf(adev)) {
1429 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1430 			entry->src_id, entry->src_data[0]);
1431 		dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1432 		return 0;
1433 	}
1434 
1435 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1436 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1437 	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1438 	/* reset addr and status */
1439 	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1440 
1441 	if (!addr && !status)
1442 		return 0;
1443 
1444 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1445 		gmc_v8_0_set_fault_enable_default(adev, false);
1446 
1447 	if (printk_ratelimit()) {
1448 		struct amdgpu_task_info task_info = { 0 };
1449 
1450 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1451 
1452 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
1453 			entry->src_id, entry->src_data[0], task_info.process_name,
1454 			task_info.tgid, task_info.task_name, task_info.pid);
1455 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1456 			addr);
1457 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1458 			status);
1459 		gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1460 					 entry->pasid);
1461 	}
1462 
1463 	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1464 			     VMID);
1465 	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1466 		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1467 		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1468 		u32 protections = REG_GET_FIELD(status,
1469 					VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1470 					PROTECTIONS);
1471 
1472 		info->vmid = vmid;
1473 		info->mc_id = REG_GET_FIELD(status,
1474 					    VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1475 					    MEMORY_CLIENT_ID);
1476 		info->status = status;
1477 		info->page_addr = addr;
1478 		info->prot_valid = protections & 0x7 ? true : false;
1479 		info->prot_read = protections & 0x8 ? true : false;
1480 		info->prot_write = protections & 0x10 ? true : false;
1481 		info->prot_exec = protections & 0x20 ? true : false;
1482 		mb();
1483 		atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1490 						     bool enable)
1491 {
1492 	uint32_t data;
1493 
1494 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1495 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1496 		data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1497 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1498 
1499 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1500 		data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1501 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1502 
1503 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1504 		data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1505 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1506 
1507 		data = RREG32(mmMC_XPB_CLK_GAT);
1508 		data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1509 		WREG32(mmMC_XPB_CLK_GAT, data);
1510 
1511 		data = RREG32(mmATC_MISC_CG);
1512 		data |= ATC_MISC_CG__ENABLE_MASK;
1513 		WREG32(mmATC_MISC_CG, data);
1514 
1515 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1516 		data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1517 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1518 
1519 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1520 		data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1521 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1522 
1523 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1524 		data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1525 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1526 
1527 		data = RREG32(mmVM_L2_CG);
1528 		data |= VM_L2_CG__ENABLE_MASK;
1529 		WREG32(mmVM_L2_CG, data);
1530 	} else {
1531 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1532 		data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1533 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1534 
1535 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1536 		data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1537 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1538 
1539 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1540 		data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1541 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1542 
1543 		data = RREG32(mmMC_XPB_CLK_GAT);
1544 		data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1545 		WREG32(mmMC_XPB_CLK_GAT, data);
1546 
1547 		data = RREG32(mmATC_MISC_CG);
1548 		data &= ~ATC_MISC_CG__ENABLE_MASK;
1549 		WREG32(mmATC_MISC_CG, data);
1550 
1551 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1552 		data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1553 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1554 
1555 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1556 		data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1557 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1558 
1559 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1560 		data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1561 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1562 
1563 		data = RREG32(mmVM_L2_CG);
1564 		data &= ~VM_L2_CG__ENABLE_MASK;
1565 		WREG32(mmVM_L2_CG, data);
1566 	}
1567 }
1568 
1569 static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1570 				       bool enable)
1571 {
1572 	uint32_t data;
1573 
1574 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1575 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1576 		data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1577 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1578 
1579 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1580 		data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1581 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1582 
1583 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1584 		data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1585 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1586 
1587 		data = RREG32(mmMC_XPB_CLK_GAT);
1588 		data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1589 		WREG32(mmMC_XPB_CLK_GAT, data);
1590 
1591 		data = RREG32(mmATC_MISC_CG);
1592 		data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1593 		WREG32(mmATC_MISC_CG, data);
1594 
1595 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1596 		data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1597 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1598 
1599 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1600 		data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1601 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1602 
1603 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1604 		data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1605 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1606 
1607 		data = RREG32(mmVM_L2_CG);
1608 		data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1609 		WREG32(mmVM_L2_CG, data);
1610 	} else {
1611 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1612 		data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1613 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1614 
1615 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1616 		data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1617 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1618 
1619 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1620 		data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1621 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1622 
1623 		data = RREG32(mmMC_XPB_CLK_GAT);
1624 		data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1625 		WREG32(mmMC_XPB_CLK_GAT, data);
1626 
1627 		data = RREG32(mmATC_MISC_CG);
1628 		data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1629 		WREG32(mmATC_MISC_CG, data);
1630 
1631 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1632 		data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1633 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1634 
1635 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1636 		data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1637 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1638 
1639 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1640 		data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1641 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1642 
1643 		data = RREG32(mmVM_L2_CG);
1644 		data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1645 		WREG32(mmVM_L2_CG, data);
1646 	}
1647 }
1648 
1649 static int gmc_v8_0_set_clockgating_state(void *handle,
1650 					  enum amd_clockgating_state state)
1651 {
1652 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1653 
1654 	if (amdgpu_sriov_vf(adev))
1655 		return 0;
1656 
1657 	switch (adev->asic_type) {
1658 	case CHIP_FIJI:
1659 		fiji_update_mc_medium_grain_clock_gating(adev,
1660 				state == AMD_CG_STATE_GATE);
1661 		fiji_update_mc_light_sleep(adev,
1662 				state == AMD_CG_STATE_GATE);
1663 		break;
1664 	default:
1665 		break;
1666 	}
1667 	return 0;
1668 }
1669 
1670 static int gmc_v8_0_set_powergating_state(void *handle,
1671 					  enum amd_powergating_state state)
1672 {
1673 	return 0;
1674 }
1675 
1676 static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
1677 {
1678 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1679 	int data;
1680 
1681 	if (amdgpu_sriov_vf(adev))
1682 		*flags = 0;
1683 
1684 	/* AMD_CG_SUPPORT_MC_MGCG */
1685 	data = RREG32(mmMC_HUB_MISC_HUB_CG);
1686 	if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1687 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
1688 
1689 	/* AMD_CG_SUPPORT_MC_LS */
1690 	if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1691 		*flags |= AMD_CG_SUPPORT_MC_LS;
1692 }
1693 
1694 static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1695 	.name = "gmc_v8_0",
1696 	.early_init = gmc_v8_0_early_init,
1697 	.late_init = gmc_v8_0_late_init,
1698 	.sw_init = gmc_v8_0_sw_init,
1699 	.sw_fini = gmc_v8_0_sw_fini,
1700 	.hw_init = gmc_v8_0_hw_init,
1701 	.hw_fini = gmc_v8_0_hw_fini,
1702 	.suspend = gmc_v8_0_suspend,
1703 	.resume = gmc_v8_0_resume,
1704 	.is_idle = gmc_v8_0_is_idle,
1705 	.wait_for_idle = gmc_v8_0_wait_for_idle,
1706 	.check_soft_reset = gmc_v8_0_check_soft_reset,
1707 	.pre_soft_reset = gmc_v8_0_pre_soft_reset,
1708 	.soft_reset = gmc_v8_0_soft_reset,
1709 	.post_soft_reset = gmc_v8_0_post_soft_reset,
1710 	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
1711 	.set_powergating_state = gmc_v8_0_set_powergating_state,
1712 	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
1713 };
1714 
1715 static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1716 	.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1717 	.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1718 	.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1719 	.set_pte_pde = gmc_v8_0_set_pte_pde,
1720 	.set_prt = gmc_v8_0_set_prt,
1721 	.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
1722 	.get_vm_pde = gmc_v8_0_get_vm_pde
1723 };
1724 
1725 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1726 	.set = gmc_v8_0_vm_fault_interrupt_state,
1727 	.process = gmc_v8_0_process_interrupt,
1728 };
1729 
1730 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1731 {
1732 	if (adev->gmc.gmc_funcs == NULL)
1733 		adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1734 }
1735 
1736 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1737 {
1738 	adev->gmc.vm_fault.num_types = 1;
1739 	adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1740 }
1741 
1742 const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1743 {
1744 	.type = AMD_IP_BLOCK_TYPE_GMC,
1745 	.major = 8,
1746 	.minor = 0,
1747 	.rev = 0,
1748 	.funcs = &gmc_v8_0_ip_funcs,
1749 };
1750 
1751 const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1752 {
1753 	.type = AMD_IP_BLOCK_TYPE_GMC,
1754 	.major = 8,
1755 	.minor = 1,
1756 	.rev = 0,
1757 	.funcs = &gmc_v8_0_ip_funcs,
1758 };
1759 
1760 const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1761 {
1762 	.type = AMD_IP_BLOCK_TYPE_GMC,
1763 	.major = 8,
1764 	.minor = 5,
1765 	.rev = 0,
1766 	.funcs = &gmc_v8_0_ip_funcs,
1767 };
1768