xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c (revision 4ee57308)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include <drm/drm_cache.h>
29 #include "amdgpu.h"
30 #include "gmc_v8_0.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_amdkfd.h"
33 #include "amdgpu_gem.h"
34 
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
37 
38 #include "bif/bif_5_0_d.h"
39 #include "bif/bif_5_0_sh_mask.h"
40 
41 #include "oss/oss_3_0_d.h"
42 #include "oss/oss_3_0_sh_mask.h"
43 
44 #include "dce/dce_10_0_d.h"
45 #include "dce/dce_10_0_sh_mask.h"
46 
47 #include "vid.h"
48 #include "vi.h"
49 
50 #include "amdgpu_atombios.h"
51 
52 #include "ivsrcid/ivsrcid_vislands30.h"
53 
54 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
55 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
56 static int gmc_v8_0_wait_for_idle(void *handle);
57 
58 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
59 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
60 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
61 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
62 MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
63 MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
64 MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
65 
66 static const u32 golden_settings_tonga_a11[] =
67 {
68 	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
69 	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
70 	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
71 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
72 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
73 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
74 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
75 };
76 
77 static const u32 tonga_mgcg_cgcg_init[] =
78 {
79 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
80 };
81 
82 static const u32 golden_settings_fiji_a10[] =
83 {
84 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
85 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
86 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
87 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
88 };
89 
90 static const u32 fiji_mgcg_cgcg_init[] =
91 {
92 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
93 };
94 
95 static const u32 golden_settings_polaris11_a11[] =
96 {
97 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
98 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
99 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
100 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
101 };
102 
103 static const u32 golden_settings_polaris10_a11[] =
104 {
105 	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
106 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
107 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
108 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
109 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
110 };
111 
112 static const u32 cz_mgcg_cgcg_init[] =
113 {
114 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
115 };
116 
117 static const u32 stoney_mgcg_cgcg_init[] =
118 {
119 	mmATC_MISC_CG, 0xffffffff, 0x000c0200,
120 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
121 };
122 
123 static const u32 golden_settings_stoney_common[] =
124 {
125 	mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
126 	mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
127 };
128 
129 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
130 {
131 	switch (adev->asic_type) {
132 	case CHIP_FIJI:
133 		amdgpu_device_program_register_sequence(adev,
134 							fiji_mgcg_cgcg_init,
135 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
136 		amdgpu_device_program_register_sequence(adev,
137 							golden_settings_fiji_a10,
138 							ARRAY_SIZE(golden_settings_fiji_a10));
139 		break;
140 	case CHIP_TONGA:
141 		amdgpu_device_program_register_sequence(adev,
142 							tonga_mgcg_cgcg_init,
143 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
144 		amdgpu_device_program_register_sequence(adev,
145 							golden_settings_tonga_a11,
146 							ARRAY_SIZE(golden_settings_tonga_a11));
147 		break;
148 	case CHIP_POLARIS11:
149 	case CHIP_POLARIS12:
150 	case CHIP_VEGAM:
151 		amdgpu_device_program_register_sequence(adev,
152 							golden_settings_polaris11_a11,
153 							ARRAY_SIZE(golden_settings_polaris11_a11));
154 		break;
155 	case CHIP_POLARIS10:
156 		amdgpu_device_program_register_sequence(adev,
157 							golden_settings_polaris10_a11,
158 							ARRAY_SIZE(golden_settings_polaris10_a11));
159 		break;
160 	case CHIP_CARRIZO:
161 		amdgpu_device_program_register_sequence(adev,
162 							cz_mgcg_cgcg_init,
163 							ARRAY_SIZE(cz_mgcg_cgcg_init));
164 		break;
165 	case CHIP_STONEY:
166 		amdgpu_device_program_register_sequence(adev,
167 							stoney_mgcg_cgcg_init,
168 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
169 		amdgpu_device_program_register_sequence(adev,
170 							golden_settings_stoney_common,
171 							ARRAY_SIZE(golden_settings_stoney_common));
172 		break;
173 	default:
174 		break;
175 	}
176 }
177 
178 static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
179 {
180 	u32 blackout;
181 
182 	gmc_v8_0_wait_for_idle(adev);
183 
184 	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
185 	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
186 		/* Block CPU access */
187 		WREG32(mmBIF_FB_EN, 0);
188 		/* blackout the MC */
189 		blackout = REG_SET_FIELD(blackout,
190 					 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
191 		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
192 	}
193 	/* wait for the MC to settle */
194 	udelay(100);
195 }
196 
197 static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
198 {
199 	u32 tmp;
200 
201 	/* unblackout the MC */
202 	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
203 	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
204 	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
205 	/* allow CPU access */
206 	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
207 	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
208 	WREG32(mmBIF_FB_EN, tmp);
209 }
210 
211 /**
212  * gmc_v8_0_init_microcode - load ucode images from disk
213  *
214  * @adev: amdgpu_device pointer
215  *
216  * Use the firmware interface to load the ucode images into
217  * the driver (not loaded into hw).
218  * Returns 0 on success, error on failure.
219  */
220 static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
221 {
222 	const char *chip_name;
223 	char fw_name[30];
224 	int err;
225 
226 	DRM_DEBUG("\n");
227 
228 	switch (adev->asic_type) {
229 	case CHIP_TONGA:
230 		chip_name = "tonga";
231 		break;
232 	case CHIP_POLARIS11:
233 		if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
234 		    ASICID_IS_P31(adev->pdev->device, adev->pdev->revision))
235 			chip_name = "polaris11_k";
236 		else
237 			chip_name = "polaris11";
238 		break;
239 	case CHIP_POLARIS10:
240 		if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision))
241 			chip_name = "polaris10_k";
242 		else
243 			chip_name = "polaris10";
244 		break;
245 	case CHIP_POLARIS12:
246 		if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))
247 			chip_name = "polaris12_k";
248 		else
249 			chip_name = "polaris12";
250 		break;
251 	case CHIP_FIJI:
252 	case CHIP_CARRIZO:
253 	case CHIP_STONEY:
254 	case CHIP_VEGAM:
255 		return 0;
256 	default: BUG();
257 	}
258 
259 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
260 	err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
261 	if (err)
262 		goto out;
263 	err = amdgpu_ucode_validate(adev->gmc.fw);
264 
265 out:
266 	if (err) {
267 		pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
268 		release_firmware(adev->gmc.fw);
269 		adev->gmc.fw = NULL;
270 	}
271 	return err;
272 }
273 
274 /**
275  * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
276  *
277  * @adev: amdgpu_device pointer
278  *
279  * Load the GDDR MC ucode into the hw (VI).
280  * Returns 0 on success, error on failure.
281  */
282 static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
283 {
284 	const struct mc_firmware_header_v1_0 *hdr;
285 	const __le32 *fw_data = NULL;
286 	const __le32 *io_mc_regs = NULL;
287 	u32 running;
288 	int i, ucode_size, regs_size;
289 
290 	/* Skip MC ucode loading on SR-IOV capable boards.
291 	 * vbios does this for us in asic_init in that case.
292 	 * Skip MC ucode loading on VF, because hypervisor will do that
293 	 * for this adaptor.
294 	 */
295 	if (amdgpu_sriov_bios(adev))
296 		return 0;
297 
298 	if (!adev->gmc.fw)
299 		return -EINVAL;
300 
301 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
302 	amdgpu_ucode_print_mc_hdr(&hdr->header);
303 
304 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
305 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
306 	io_mc_regs = (const __le32 *)
307 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
308 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
309 	fw_data = (const __le32 *)
310 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
311 
312 	running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
313 
314 	if (running == 0) {
315 		/* reset the engine and set to writable */
316 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
317 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
318 
319 		/* load mc io regs */
320 		for (i = 0; i < regs_size; i++) {
321 			WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
322 			WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
323 		}
324 		/* load the MC ucode */
325 		for (i = 0; i < ucode_size; i++)
326 			WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
327 
328 		/* put the engine back into the active state */
329 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
330 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
331 		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
332 
333 		/* wait for training to complete */
334 		for (i = 0; i < adev->usec_timeout; i++) {
335 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
336 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
337 				break;
338 			udelay(1);
339 		}
340 		for (i = 0; i < adev->usec_timeout; i++) {
341 			if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
342 					  MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
343 				break;
344 			udelay(1);
345 		}
346 	}
347 
348 	return 0;
349 }
350 
351 static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
352 {
353 	const struct mc_firmware_header_v1_0 *hdr;
354 	const __le32 *fw_data = NULL;
355 	const __le32 *io_mc_regs = NULL;
356 	u32 data;
357 	int i, ucode_size, regs_size;
358 
359 	/* Skip MC ucode loading on SR-IOV capable boards.
360 	 * vbios does this for us in asic_init in that case.
361 	 * Skip MC ucode loading on VF, because hypervisor will do that
362 	 * for this adaptor.
363 	 */
364 	if (amdgpu_sriov_bios(adev))
365 		return 0;
366 
367 	if (!adev->gmc.fw)
368 		return -EINVAL;
369 
370 	hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
371 	amdgpu_ucode_print_mc_hdr(&hdr->header);
372 
373 	adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
374 	regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
375 	io_mc_regs = (const __le32 *)
376 		(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
377 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
378 	fw_data = (const __le32 *)
379 		(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
380 
381 	data = RREG32(mmMC_SEQ_MISC0);
382 	data &= ~(0x40);
383 	WREG32(mmMC_SEQ_MISC0, data);
384 
385 	/* load mc io regs */
386 	for (i = 0; i < regs_size; i++) {
387 		WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
388 		WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
389 	}
390 
391 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
392 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
393 
394 	/* load the MC ucode */
395 	for (i = 0; i < ucode_size; i++)
396 		WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
397 
398 	/* put the engine back into the active state */
399 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
400 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
401 	WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
402 
403 	/* wait for training to complete */
404 	for (i = 0; i < adev->usec_timeout; i++) {
405 		data = RREG32(mmMC_SEQ_MISC0);
406 		if (data & 0x80)
407 			break;
408 		udelay(1);
409 	}
410 
411 	return 0;
412 }
413 
414 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
415 				       struct amdgpu_gmc *mc)
416 {
417 	u64 base = 0;
418 
419 	if (!amdgpu_sriov_vf(adev))
420 		base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
421 	base <<= 24;
422 
423 	amdgpu_gmc_vram_location(adev, mc, base);
424 	amdgpu_gmc_gart_location(adev, mc);
425 }
426 
427 /**
428  * gmc_v8_0_mc_program - program the GPU memory controller
429  *
430  * @adev: amdgpu_device pointer
431  *
432  * Set the location of vram, gart, and AGP in the GPU's
433  * physical address space (VI).
434  */
435 static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
436 {
437 	u32 tmp;
438 	int i, j;
439 
440 	/* Initialize HDP */
441 	for (i = 0, j = 0; i < 32; i++, j += 0x6) {
442 		WREG32((0xb05 + j), 0x00000000);
443 		WREG32((0xb06 + j), 0x00000000);
444 		WREG32((0xb07 + j), 0x00000000);
445 		WREG32((0xb08 + j), 0x00000000);
446 		WREG32((0xb09 + j), 0x00000000);
447 	}
448 	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
449 
450 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
451 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
452 	}
453 	if (adev->mode_info.num_crtc) {
454 		/* Lockout access through VGA aperture*/
455 		tmp = RREG32(mmVGA_HDP_CONTROL);
456 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
457 		WREG32(mmVGA_HDP_CONTROL, tmp);
458 
459 		/* disable VGA render */
460 		tmp = RREG32(mmVGA_RENDER_CONTROL);
461 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
462 		WREG32(mmVGA_RENDER_CONTROL, tmp);
463 	}
464 	/* Update configuration */
465 	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
466 	       adev->gmc.vram_start >> 12);
467 	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
468 	       adev->gmc.vram_end >> 12);
469 	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
470 	       adev->vram_scratch.gpu_addr >> 12);
471 
472 	if (amdgpu_sriov_vf(adev)) {
473 		tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
474 		tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
475 		WREG32(mmMC_VM_FB_LOCATION, tmp);
476 		/* XXX double check these! */
477 		WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
478 		WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
479 		WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
480 	}
481 
482 	WREG32(mmMC_VM_AGP_BASE, 0);
483 	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
484 	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
485 	if (gmc_v8_0_wait_for_idle((void *)adev)) {
486 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
487 	}
488 
489 	WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
490 
491 	tmp = RREG32(mmHDP_MISC_CNTL);
492 	tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
493 	WREG32(mmHDP_MISC_CNTL, tmp);
494 
495 	tmp = RREG32(mmHDP_HOST_PATH_CNTL);
496 	WREG32(mmHDP_HOST_PATH_CNTL, tmp);
497 }
498 
499 /**
500  * gmc_v8_0_mc_init - initialize the memory controller driver params
501  *
502  * @adev: amdgpu_device pointer
503  *
504  * Look up the amount of vram, vram width, and decide how to place
505  * vram and gart within the GPU's physical address space (VI).
506  * Returns 0 for success.
507  */
508 static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
509 {
510 	int r;
511 
512 	adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
513 	if (!adev->gmc.vram_width) {
514 		u32 tmp;
515 		int chansize, numchan;
516 
517 		/* Get VRAM informations */
518 		tmp = RREG32(mmMC_ARB_RAMCFG);
519 		if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
520 			chansize = 64;
521 		} else {
522 			chansize = 32;
523 		}
524 		tmp = RREG32(mmMC_SHARED_CHMAP);
525 		switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
526 		case 0:
527 		default:
528 			numchan = 1;
529 			break;
530 		case 1:
531 			numchan = 2;
532 			break;
533 		case 2:
534 			numchan = 4;
535 			break;
536 		case 3:
537 			numchan = 8;
538 			break;
539 		case 4:
540 			numchan = 3;
541 			break;
542 		case 5:
543 			numchan = 6;
544 			break;
545 		case 6:
546 			numchan = 10;
547 			break;
548 		case 7:
549 			numchan = 12;
550 			break;
551 		case 8:
552 			numchan = 16;
553 			break;
554 		}
555 		adev->gmc.vram_width = numchan * chansize;
556 	}
557 	/* size in MB on si */
558 	adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
559 	adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
560 
561 	if (!(adev->flags & AMD_IS_APU)) {
562 		r = amdgpu_device_resize_fb_bar(adev);
563 		if (r)
564 			return r;
565 	}
566 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
567 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
568 
569 #ifdef CONFIG_X86_64
570 	if (adev->flags & AMD_IS_APU) {
571 		adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
572 		adev->gmc.aper_size = adev->gmc.real_vram_size;
573 	}
574 #endif
575 
576 	/* In case the PCI BAR is larger than the actual amount of vram */
577 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
578 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
579 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
580 
581 	/* set the gart size */
582 	if (amdgpu_gart_size == -1) {
583 		switch (adev->asic_type) {
584 		case CHIP_POLARIS10: /* all engines support GPUVM */
585 		case CHIP_POLARIS11: /* all engines support GPUVM */
586 		case CHIP_POLARIS12: /* all engines support GPUVM */
587 		case CHIP_VEGAM:     /* all engines support GPUVM */
588 		default:
589 			adev->gmc.gart_size = 256ULL << 20;
590 			break;
591 		case CHIP_TONGA:   /* UVD, VCE do not support GPUVM */
592 		case CHIP_FIJI:    /* UVD, VCE do not support GPUVM */
593 		case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
594 		case CHIP_STONEY:  /* UVD does not support GPUVM, DCE SG support */
595 			adev->gmc.gart_size = 1024ULL << 20;
596 			break;
597 		}
598 	} else {
599 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
600 	}
601 
602 	gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
603 
604 	return 0;
605 }
606 
607 /**
608  * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
609  *
610  * @adev: amdgpu_device pointer
611  * @pasid: pasid to be flush
612  *
613  * Flush the TLB for the requested pasid.
614  */
615 static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
616 					uint16_t pasid, uint32_t flush_type,
617 					bool all_hub)
618 {
619 	int vmid;
620 	unsigned int tmp;
621 
622 	if (amdgpu_in_reset(adev))
623 		return -EIO;
624 
625 	for (vmid = 1; vmid < 16; vmid++) {
626 
627 		tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
628 		if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
629 			(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
630 			WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
631 			RREG32(mmVM_INVALIDATE_RESPONSE);
632 			break;
633 		}
634 	}
635 
636 	return 0;
637 
638 }
639 
640 /*
641  * GART
642  * VMID 0 is the physical GPU addresses as used by the kernel.
643  * VMIDs 1-15 are used for userspace clients and are handled
644  * by the amdgpu vm/hsa code.
645  */
646 
647 /**
648  * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
649  *
650  * @adev: amdgpu_device pointer
651  * @vmid: vm instance to flush
652  *
653  * Flush the TLB for the requested page table (VI).
654  */
655 static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
656 					uint32_t vmhub, uint32_t flush_type)
657 {
658 	/* bits 0-15 are the VM contexts0-15 */
659 	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
660 }
661 
662 static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
663 					    unsigned vmid, uint64_t pd_addr)
664 {
665 	uint32_t reg;
666 
667 	if (vmid < 8)
668 		reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
669 	else
670 		reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
671 	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
672 
673 	/* bits 0-15 are the VM contexts0-15 */
674 	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
675 
676 	return pd_addr;
677 }
678 
679 static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
680 					unsigned pasid)
681 {
682 	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
683 }
684 
685 /*
686  * PTE format on VI:
687  * 63:40 reserved
688  * 39:12 4k physical page base address
689  * 11:7 fragment
690  * 6 write
691  * 5 read
692  * 4 exe
693  * 3 reserved
694  * 2 snooped
695  * 1 system
696  * 0 valid
697  *
698  * PDE format on VI:
699  * 63:59 block fragment size
700  * 58:40 reserved
701  * 39:1 physical base address of PTE
702  * bits 5:1 must be 0.
703  * 0 valid
704  */
705 
706 static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
707 				uint64_t *addr, uint64_t *flags)
708 {
709 	BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
710 }
711 
712 static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
713 				struct amdgpu_bo_va_mapping *mapping,
714 				uint64_t *flags)
715 {
716 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
717 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
718 	*flags &= ~AMDGPU_PTE_PRT;
719 }
720 
721 /**
722  * gmc_v8_0_set_fault_enable_default - update VM fault handling
723  *
724  * @adev: amdgpu_device pointer
725  * @value: true redirects VM faults to the default page
726  */
727 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
728 					      bool value)
729 {
730 	u32 tmp;
731 
732 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
733 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
734 			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
735 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
736 			    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
737 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
738 			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
739 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
740 			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
741 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
742 			    READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
743 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
744 			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
745 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
746 			    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
747 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
748 }
749 
750 /**
751  * gmc_v8_0_set_prt - set PRT VM fault
752  *
753  * @adev: amdgpu_device pointer
754  * @enable: enable/disable VM fault handling for PRT
755 */
756 static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
757 {
758 	u32 tmp;
759 
760 	if (enable && !adev->gmc.prt_warning) {
761 		dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
762 		adev->gmc.prt_warning = true;
763 	}
764 
765 	tmp = RREG32(mmVM_PRT_CNTL);
766 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
767 			    CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
768 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
769 			    CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
770 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
771 			    TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
772 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
773 			    TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
774 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
775 			    L2_CACHE_STORE_INVALID_ENTRIES, enable);
776 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
777 			    L1_TLB_STORE_INVALID_ENTRIES, enable);
778 	tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
779 			    MASK_PDE0_FAULT, enable);
780 	WREG32(mmVM_PRT_CNTL, tmp);
781 
782 	if (enable) {
783 		uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
784 		uint32_t high = adev->vm_manager.max_pfn -
785 			(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
786 
787 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
788 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
789 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
790 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
791 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
792 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
793 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
794 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
795 	} else {
796 		WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
797 		WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
798 		WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
799 		WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
800 		WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
801 		WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
802 		WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
803 		WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
804 	}
805 }
806 
807 /**
808  * gmc_v8_0_gart_enable - gart enable
809  *
810  * @adev: amdgpu_device pointer
811  *
812  * This sets up the TLBs, programs the page tables for VMID0,
813  * sets up the hw for VMIDs 1-15 which are allocated on
814  * demand, and sets up the global locations for the LDS, GDS,
815  * and GPUVM for FSA64 clients (VI).
816  * Returns 0 for success, errors for failure.
817  */
818 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
819 {
820 	uint64_t table_addr;
821 	int r, i;
822 	u32 tmp, field;
823 
824 	if (adev->gart.bo == NULL) {
825 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
826 		return -EINVAL;
827 	}
828 	r = amdgpu_gart_table_vram_pin(adev);
829 	if (r)
830 		return r;
831 
832 	table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
833 
834 	/* Setup TLB control */
835 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
836 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
837 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
838 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
839 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
840 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
841 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
842 	/* Setup L2 cache */
843 	tmp = RREG32(mmVM_L2_CNTL);
844 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
845 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
846 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
847 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
848 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
849 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
850 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
851 	WREG32(mmVM_L2_CNTL, tmp);
852 	tmp = RREG32(mmVM_L2_CNTL2);
853 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
854 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
855 	WREG32(mmVM_L2_CNTL2, tmp);
856 
857 	field = adev->vm_manager.fragment_size;
858 	tmp = RREG32(mmVM_L2_CNTL3);
859 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
860 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
861 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
862 	WREG32(mmVM_L2_CNTL3, tmp);
863 	/* XXX: set to enable PTE/PDE in system memory */
864 	tmp = RREG32(mmVM_L2_CNTL4);
865 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
866 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
867 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
868 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
869 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
870 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
871 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
872 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
873 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
874 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
875 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
876 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
877 	WREG32(mmVM_L2_CNTL4, tmp);
878 	/* setup context0 */
879 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
880 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
881 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
882 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
883 			(u32)(adev->dummy_page_addr >> 12));
884 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
885 	tmp = RREG32(mmVM_CONTEXT0_CNTL);
886 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
887 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
888 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
889 	WREG32(mmVM_CONTEXT0_CNTL, tmp);
890 
891 	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
892 	WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
893 	WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
894 
895 	/* empty context1-15 */
896 	/* FIXME start with 4G, once using 2 level pt switch to full
897 	 * vm size space
898 	 */
899 	/* set vm size, must be a multiple of 4 */
900 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
901 	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
902 	for (i = 1; i < 16; i++) {
903 		if (i < 8)
904 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
905 			       table_addr >> 12);
906 		else
907 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
908 			       table_addr >> 12);
909 	}
910 
911 	/* enable context1-15 */
912 	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
913 	       (u32)(adev->dummy_page_addr >> 12));
914 	WREG32(mmVM_CONTEXT1_CNTL2, 4);
915 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
916 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
917 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
918 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
919 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
920 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
921 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
922 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
923 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
924 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
925 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
926 			    adev->vm_manager.block_size - 9);
927 	WREG32(mmVM_CONTEXT1_CNTL, tmp);
928 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
929 		gmc_v8_0_set_fault_enable_default(adev, false);
930 	else
931 		gmc_v8_0_set_fault_enable_default(adev, true);
932 
933 	gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
934 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
935 		 (unsigned)(adev->gmc.gart_size >> 20),
936 		 (unsigned long long)table_addr);
937 	adev->gart.ready = true;
938 	return 0;
939 }
940 
941 static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
942 {
943 	int r;
944 
945 	if (adev->gart.bo) {
946 		WARN(1, "R600 PCIE GART already initialized\n");
947 		return 0;
948 	}
949 	/* Initialize common gart structure */
950 	r = amdgpu_gart_init(adev);
951 	if (r)
952 		return r;
953 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
954 	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
955 	return amdgpu_gart_table_vram_alloc(adev);
956 }
957 
958 /**
959  * gmc_v8_0_gart_disable - gart disable
960  *
961  * @adev: amdgpu_device pointer
962  *
963  * This disables all VM page table (VI).
964  */
965 static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
966 {
967 	u32 tmp;
968 
969 	/* Disable all tables */
970 	WREG32(mmVM_CONTEXT0_CNTL, 0);
971 	WREG32(mmVM_CONTEXT1_CNTL, 0);
972 	/* Setup TLB control */
973 	tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
974 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
975 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
976 	tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
977 	WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
978 	/* Setup L2 cache */
979 	tmp = RREG32(mmVM_L2_CNTL);
980 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
981 	WREG32(mmVM_L2_CNTL, tmp);
982 	WREG32(mmVM_L2_CNTL2, 0);
983 	amdgpu_gart_table_vram_unpin(adev);
984 }
985 
986 /**
987  * gmc_v8_0_vm_decode_fault - print human readable fault info
988  *
989  * @adev: amdgpu_device pointer
990  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
991  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
992  * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
993  *
994  * Print human readable fault information (VI).
995  */
996 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
997 				     u32 addr, u32 mc_client, unsigned pasid)
998 {
999 	u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
1000 	u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1001 					PROTECTIONS);
1002 	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
1003 		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
1004 	u32 mc_id;
1005 
1006 	mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1007 			      MEMORY_CLIENT_ID);
1008 
1009 	dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1010 	       protections, vmid, pasid, addr,
1011 	       REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1012 			     MEMORY_CLIENT_RW) ?
1013 	       "write" : "read", block, mc_client, mc_id);
1014 }
1015 
1016 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1017 {
1018 	switch (mc_seq_vram_type) {
1019 	case MC_SEQ_MISC0__MT__GDDR1:
1020 		return AMDGPU_VRAM_TYPE_GDDR1;
1021 	case MC_SEQ_MISC0__MT__DDR2:
1022 		return AMDGPU_VRAM_TYPE_DDR2;
1023 	case MC_SEQ_MISC0__MT__GDDR3:
1024 		return AMDGPU_VRAM_TYPE_GDDR3;
1025 	case MC_SEQ_MISC0__MT__GDDR4:
1026 		return AMDGPU_VRAM_TYPE_GDDR4;
1027 	case MC_SEQ_MISC0__MT__GDDR5:
1028 		return AMDGPU_VRAM_TYPE_GDDR5;
1029 	case MC_SEQ_MISC0__MT__HBM:
1030 		return AMDGPU_VRAM_TYPE_HBM;
1031 	case MC_SEQ_MISC0__MT__DDR3:
1032 		return AMDGPU_VRAM_TYPE_DDR3;
1033 	default:
1034 		return AMDGPU_VRAM_TYPE_UNKNOWN;
1035 	}
1036 }
1037 
1038 static int gmc_v8_0_early_init(void *handle)
1039 {
1040 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1041 
1042 	gmc_v8_0_set_gmc_funcs(adev);
1043 	gmc_v8_0_set_irq_funcs(adev);
1044 
1045 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1046 	adev->gmc.shared_aperture_end =
1047 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1048 	adev->gmc.private_aperture_start =
1049 		adev->gmc.shared_aperture_end + 1;
1050 	adev->gmc.private_aperture_end =
1051 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1052 
1053 	return 0;
1054 }
1055 
1056 static int gmc_v8_0_late_init(void *handle)
1057 {
1058 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1059 
1060 	amdgpu_bo_late_init(adev);
1061 
1062 	if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1063 		return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1064 	else
1065 		return 0;
1066 }
1067 
1068 static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1069 {
1070 	u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1071 	unsigned size;
1072 
1073 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1074 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
1075 	} else {
1076 		u32 viewport = RREG32(mmVIEWPORT_SIZE);
1077 		size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1078 			REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1079 			4);
1080 	}
1081 
1082 	return size;
1083 }
1084 
1085 #define mmMC_SEQ_MISC0_FIJI 0xA71
1086 
1087 static int gmc_v8_0_sw_init(void *handle)
1088 {
1089 	int r;
1090 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1091 
1092 	adev->num_vmhubs = 1;
1093 
1094 	if (adev->flags & AMD_IS_APU) {
1095 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1096 	} else {
1097 		u32 tmp;
1098 
1099 		if ((adev->asic_type == CHIP_FIJI) ||
1100 		    (adev->asic_type == CHIP_VEGAM))
1101 			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1102 		else
1103 			tmp = RREG32(mmMC_SEQ_MISC0);
1104 		tmp &= MC_SEQ_MISC0__MT__MASK;
1105 		adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1106 	}
1107 
1108 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1109 	if (r)
1110 		return r;
1111 
1112 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1113 	if (r)
1114 		return r;
1115 
1116 	/* Adjust VM size here.
1117 	 * Currently set to 4GB ((1 << 20) 4k pages).
1118 	 * Max GPUVM size for cayman and SI is 40 bits.
1119 	 */
1120 	amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1121 
1122 	/* Set the internal MC address mask
1123 	 * This is the max address of the GPU's
1124 	 * internal address space.
1125 	 */
1126 	adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1127 
1128 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
1129 	if (r) {
1130 		pr_warn("No suitable DMA available\n");
1131 		return r;
1132 	}
1133 	adev->need_swiotlb = drm_need_swiotlb(40);
1134 
1135 	r = gmc_v8_0_init_microcode(adev);
1136 	if (r) {
1137 		DRM_ERROR("Failed to load mc firmware!\n");
1138 		return r;
1139 	}
1140 
1141 	r = gmc_v8_0_mc_init(adev);
1142 	if (r)
1143 		return r;
1144 
1145 	amdgpu_gmc_get_vbios_allocations(adev);
1146 
1147 	/* Memory manager */
1148 	r = amdgpu_bo_init(adev);
1149 	if (r)
1150 		return r;
1151 
1152 	r = gmc_v8_0_gart_init(adev);
1153 	if (r)
1154 		return r;
1155 
1156 	/*
1157 	 * number of VMs
1158 	 * VMID 0 is reserved for System
1159 	 * amdgpu graphics/compute will use VMIDs 1-7
1160 	 * amdkfd will use VMIDs 8-15
1161 	 */
1162 	adev->vm_manager.first_kfd_vmid = 8;
1163 	amdgpu_vm_manager_init(adev);
1164 
1165 	/* base offset of vram pages */
1166 	if (adev->flags & AMD_IS_APU) {
1167 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1168 
1169 		tmp <<= 22;
1170 		adev->vm_manager.vram_base_offset = tmp;
1171 	} else {
1172 		adev->vm_manager.vram_base_offset = 0;
1173 	}
1174 
1175 	adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1176 					GFP_KERNEL);
1177 	if (!adev->gmc.vm_fault_info)
1178 		return -ENOMEM;
1179 	atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1180 
1181 	return 0;
1182 }
1183 
1184 static int gmc_v8_0_sw_fini(void *handle)
1185 {
1186 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1187 
1188 	amdgpu_gem_force_release(adev);
1189 	amdgpu_vm_manager_fini(adev);
1190 	kfree(adev->gmc.vm_fault_info);
1191 	amdgpu_gart_table_vram_free(adev);
1192 	amdgpu_bo_fini(adev);
1193 	amdgpu_gart_fini(adev);
1194 	release_firmware(adev->gmc.fw);
1195 	adev->gmc.fw = NULL;
1196 
1197 	return 0;
1198 }
1199 
1200 static int gmc_v8_0_hw_init(void *handle)
1201 {
1202 	int r;
1203 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1204 
1205 	gmc_v8_0_init_golden_registers(adev);
1206 
1207 	gmc_v8_0_mc_program(adev);
1208 
1209 	if (adev->asic_type == CHIP_TONGA) {
1210 		r = gmc_v8_0_tonga_mc_load_microcode(adev);
1211 		if (r) {
1212 			DRM_ERROR("Failed to load MC firmware!\n");
1213 			return r;
1214 		}
1215 	} else if (adev->asic_type == CHIP_POLARIS11 ||
1216 			adev->asic_type == CHIP_POLARIS10 ||
1217 			adev->asic_type == CHIP_POLARIS12) {
1218 		r = gmc_v8_0_polaris_mc_load_microcode(adev);
1219 		if (r) {
1220 			DRM_ERROR("Failed to load MC firmware!\n");
1221 			return r;
1222 		}
1223 	}
1224 
1225 	r = gmc_v8_0_gart_enable(adev);
1226 	if (r)
1227 		return r;
1228 
1229 	return r;
1230 }
1231 
1232 static int gmc_v8_0_hw_fini(void *handle)
1233 {
1234 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1235 
1236 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1237 	gmc_v8_0_gart_disable(adev);
1238 
1239 	return 0;
1240 }
1241 
1242 static int gmc_v8_0_suspend(void *handle)
1243 {
1244 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1245 
1246 	gmc_v8_0_hw_fini(adev);
1247 
1248 	return 0;
1249 }
1250 
1251 static int gmc_v8_0_resume(void *handle)
1252 {
1253 	int r;
1254 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1255 
1256 	r = gmc_v8_0_hw_init(adev);
1257 	if (r)
1258 		return r;
1259 
1260 	amdgpu_vmid_reset_all(adev);
1261 
1262 	return 0;
1263 }
1264 
1265 static bool gmc_v8_0_is_idle(void *handle)
1266 {
1267 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1268 	u32 tmp = RREG32(mmSRBM_STATUS);
1269 
1270 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1271 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1272 		return false;
1273 
1274 	return true;
1275 }
1276 
1277 static int gmc_v8_0_wait_for_idle(void *handle)
1278 {
1279 	unsigned i;
1280 	u32 tmp;
1281 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1282 
1283 	for (i = 0; i < adev->usec_timeout; i++) {
1284 		/* read MC_STATUS */
1285 		tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1286 					       SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1287 					       SRBM_STATUS__MCC_BUSY_MASK |
1288 					       SRBM_STATUS__MCD_BUSY_MASK |
1289 					       SRBM_STATUS__VMC_BUSY_MASK |
1290 					       SRBM_STATUS__VMC1_BUSY_MASK);
1291 		if (!tmp)
1292 			return 0;
1293 		udelay(1);
1294 	}
1295 	return -ETIMEDOUT;
1296 
1297 }
1298 
1299 static bool gmc_v8_0_check_soft_reset(void *handle)
1300 {
1301 	u32 srbm_soft_reset = 0;
1302 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1303 	u32 tmp = RREG32(mmSRBM_STATUS);
1304 
1305 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1306 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1307 						SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1308 
1309 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1310 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1311 		if (!(adev->flags & AMD_IS_APU))
1312 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1313 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1314 	}
1315 	if (srbm_soft_reset) {
1316 		adev->gmc.srbm_soft_reset = srbm_soft_reset;
1317 		return true;
1318 	} else {
1319 		adev->gmc.srbm_soft_reset = 0;
1320 		return false;
1321 	}
1322 }
1323 
1324 static int gmc_v8_0_pre_soft_reset(void *handle)
1325 {
1326 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1327 
1328 	if (!adev->gmc.srbm_soft_reset)
1329 		return 0;
1330 
1331 	gmc_v8_0_mc_stop(adev);
1332 	if (gmc_v8_0_wait_for_idle(adev)) {
1333 		dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int gmc_v8_0_soft_reset(void *handle)
1340 {
1341 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1342 	u32 srbm_soft_reset;
1343 
1344 	if (!adev->gmc.srbm_soft_reset)
1345 		return 0;
1346 	srbm_soft_reset = adev->gmc.srbm_soft_reset;
1347 
1348 	if (srbm_soft_reset) {
1349 		u32 tmp;
1350 
1351 		tmp = RREG32(mmSRBM_SOFT_RESET);
1352 		tmp |= srbm_soft_reset;
1353 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1354 		WREG32(mmSRBM_SOFT_RESET, tmp);
1355 		tmp = RREG32(mmSRBM_SOFT_RESET);
1356 
1357 		udelay(50);
1358 
1359 		tmp &= ~srbm_soft_reset;
1360 		WREG32(mmSRBM_SOFT_RESET, tmp);
1361 		tmp = RREG32(mmSRBM_SOFT_RESET);
1362 
1363 		/* Wait a little for things to settle down */
1364 		udelay(50);
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 static int gmc_v8_0_post_soft_reset(void *handle)
1371 {
1372 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1373 
1374 	if (!adev->gmc.srbm_soft_reset)
1375 		return 0;
1376 
1377 	gmc_v8_0_mc_resume(adev);
1378 	return 0;
1379 }
1380 
1381 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1382 					     struct amdgpu_irq_src *src,
1383 					     unsigned type,
1384 					     enum amdgpu_interrupt_state state)
1385 {
1386 	u32 tmp;
1387 	u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1388 		    VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1389 		    VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1390 		    VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1391 		    VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1392 		    VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1393 		    VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1394 
1395 	switch (state) {
1396 	case AMDGPU_IRQ_STATE_DISABLE:
1397 		/* system context */
1398 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1399 		tmp &= ~bits;
1400 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1401 		/* VMs */
1402 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1403 		tmp &= ~bits;
1404 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1405 		break;
1406 	case AMDGPU_IRQ_STATE_ENABLE:
1407 		/* system context */
1408 		tmp = RREG32(mmVM_CONTEXT0_CNTL);
1409 		tmp |= bits;
1410 		WREG32(mmVM_CONTEXT0_CNTL, tmp);
1411 		/* VMs */
1412 		tmp = RREG32(mmVM_CONTEXT1_CNTL);
1413 		tmp |= bits;
1414 		WREG32(mmVM_CONTEXT1_CNTL, tmp);
1415 		break;
1416 	default:
1417 		break;
1418 	}
1419 
1420 	return 0;
1421 }
1422 
1423 static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1424 				      struct amdgpu_irq_src *source,
1425 				      struct amdgpu_iv_entry *entry)
1426 {
1427 	u32 addr, status, mc_client, vmid;
1428 
1429 	if (amdgpu_sriov_vf(adev)) {
1430 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1431 			entry->src_id, entry->src_data[0]);
1432 		dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1433 		return 0;
1434 	}
1435 
1436 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1437 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1438 	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1439 	/* reset addr and status */
1440 	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1441 
1442 	if (!addr && !status)
1443 		return 0;
1444 
1445 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1446 		gmc_v8_0_set_fault_enable_default(adev, false);
1447 
1448 	if (printk_ratelimit()) {
1449 		struct amdgpu_task_info task_info;
1450 
1451 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
1452 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1453 
1454 		dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
1455 			entry->src_id, entry->src_data[0], task_info.process_name,
1456 			task_info.tgid, task_info.task_name, task_info.pid);
1457 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1458 			addr);
1459 		dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1460 			status);
1461 		gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1462 					 entry->pasid);
1463 	}
1464 
1465 	vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1466 			     VMID);
1467 	if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1468 		&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1469 		struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1470 		u32 protections = REG_GET_FIELD(status,
1471 					VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1472 					PROTECTIONS);
1473 
1474 		info->vmid = vmid;
1475 		info->mc_id = REG_GET_FIELD(status,
1476 					    VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1477 					    MEMORY_CLIENT_ID);
1478 		info->status = status;
1479 		info->page_addr = addr;
1480 		info->prot_valid = protections & 0x7 ? true : false;
1481 		info->prot_read = protections & 0x8 ? true : false;
1482 		info->prot_write = protections & 0x10 ? true : false;
1483 		info->prot_exec = protections & 0x20 ? true : false;
1484 		mb();
1485 		atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1492 						     bool enable)
1493 {
1494 	uint32_t data;
1495 
1496 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1497 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1498 		data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1499 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1500 
1501 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1502 		data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1503 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1504 
1505 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1506 		data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1507 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1508 
1509 		data = RREG32(mmMC_XPB_CLK_GAT);
1510 		data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1511 		WREG32(mmMC_XPB_CLK_GAT, data);
1512 
1513 		data = RREG32(mmATC_MISC_CG);
1514 		data |= ATC_MISC_CG__ENABLE_MASK;
1515 		WREG32(mmATC_MISC_CG, data);
1516 
1517 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1518 		data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1519 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1520 
1521 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1522 		data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1523 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1524 
1525 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1526 		data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1527 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1528 
1529 		data = RREG32(mmVM_L2_CG);
1530 		data |= VM_L2_CG__ENABLE_MASK;
1531 		WREG32(mmVM_L2_CG, data);
1532 	} else {
1533 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1534 		data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1535 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1536 
1537 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1538 		data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1539 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1540 
1541 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1542 		data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1543 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1544 
1545 		data = RREG32(mmMC_XPB_CLK_GAT);
1546 		data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1547 		WREG32(mmMC_XPB_CLK_GAT, data);
1548 
1549 		data = RREG32(mmATC_MISC_CG);
1550 		data &= ~ATC_MISC_CG__ENABLE_MASK;
1551 		WREG32(mmATC_MISC_CG, data);
1552 
1553 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1554 		data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1555 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1556 
1557 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1558 		data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1559 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1560 
1561 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1562 		data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1563 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1564 
1565 		data = RREG32(mmVM_L2_CG);
1566 		data &= ~VM_L2_CG__ENABLE_MASK;
1567 		WREG32(mmVM_L2_CG, data);
1568 	}
1569 }
1570 
1571 static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1572 				       bool enable)
1573 {
1574 	uint32_t data;
1575 
1576 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1577 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1578 		data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1579 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1580 
1581 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1582 		data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1583 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1584 
1585 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1586 		data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1587 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1588 
1589 		data = RREG32(mmMC_XPB_CLK_GAT);
1590 		data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1591 		WREG32(mmMC_XPB_CLK_GAT, data);
1592 
1593 		data = RREG32(mmATC_MISC_CG);
1594 		data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1595 		WREG32(mmATC_MISC_CG, data);
1596 
1597 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1598 		data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1599 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1600 
1601 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1602 		data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1603 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1604 
1605 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1606 		data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1607 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1608 
1609 		data = RREG32(mmVM_L2_CG);
1610 		data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1611 		WREG32(mmVM_L2_CG, data);
1612 	} else {
1613 		data = RREG32(mmMC_HUB_MISC_HUB_CG);
1614 		data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1615 		WREG32(mmMC_HUB_MISC_HUB_CG, data);
1616 
1617 		data = RREG32(mmMC_HUB_MISC_SIP_CG);
1618 		data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1619 		WREG32(mmMC_HUB_MISC_SIP_CG, data);
1620 
1621 		data = RREG32(mmMC_HUB_MISC_VM_CG);
1622 		data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1623 		WREG32(mmMC_HUB_MISC_VM_CG, data);
1624 
1625 		data = RREG32(mmMC_XPB_CLK_GAT);
1626 		data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1627 		WREG32(mmMC_XPB_CLK_GAT, data);
1628 
1629 		data = RREG32(mmATC_MISC_CG);
1630 		data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1631 		WREG32(mmATC_MISC_CG, data);
1632 
1633 		data = RREG32(mmMC_CITF_MISC_WR_CG);
1634 		data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1635 		WREG32(mmMC_CITF_MISC_WR_CG, data);
1636 
1637 		data = RREG32(mmMC_CITF_MISC_RD_CG);
1638 		data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1639 		WREG32(mmMC_CITF_MISC_RD_CG, data);
1640 
1641 		data = RREG32(mmMC_CITF_MISC_VM_CG);
1642 		data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1643 		WREG32(mmMC_CITF_MISC_VM_CG, data);
1644 
1645 		data = RREG32(mmVM_L2_CG);
1646 		data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1647 		WREG32(mmVM_L2_CG, data);
1648 	}
1649 }
1650 
1651 static int gmc_v8_0_set_clockgating_state(void *handle,
1652 					  enum amd_clockgating_state state)
1653 {
1654 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1655 
1656 	if (amdgpu_sriov_vf(adev))
1657 		return 0;
1658 
1659 	switch (adev->asic_type) {
1660 	case CHIP_FIJI:
1661 		fiji_update_mc_medium_grain_clock_gating(adev,
1662 				state == AMD_CG_STATE_GATE);
1663 		fiji_update_mc_light_sleep(adev,
1664 				state == AMD_CG_STATE_GATE);
1665 		break;
1666 	default:
1667 		break;
1668 	}
1669 	return 0;
1670 }
1671 
1672 static int gmc_v8_0_set_powergating_state(void *handle,
1673 					  enum amd_powergating_state state)
1674 {
1675 	return 0;
1676 }
1677 
1678 static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
1679 {
1680 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1681 	int data;
1682 
1683 	if (amdgpu_sriov_vf(adev))
1684 		*flags = 0;
1685 
1686 	/* AMD_CG_SUPPORT_MC_MGCG */
1687 	data = RREG32(mmMC_HUB_MISC_HUB_CG);
1688 	if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1689 		*flags |= AMD_CG_SUPPORT_MC_MGCG;
1690 
1691 	/* AMD_CG_SUPPORT_MC_LS */
1692 	if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1693 		*flags |= AMD_CG_SUPPORT_MC_LS;
1694 }
1695 
1696 static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1697 	.name = "gmc_v8_0",
1698 	.early_init = gmc_v8_0_early_init,
1699 	.late_init = gmc_v8_0_late_init,
1700 	.sw_init = gmc_v8_0_sw_init,
1701 	.sw_fini = gmc_v8_0_sw_fini,
1702 	.hw_init = gmc_v8_0_hw_init,
1703 	.hw_fini = gmc_v8_0_hw_fini,
1704 	.suspend = gmc_v8_0_suspend,
1705 	.resume = gmc_v8_0_resume,
1706 	.is_idle = gmc_v8_0_is_idle,
1707 	.wait_for_idle = gmc_v8_0_wait_for_idle,
1708 	.check_soft_reset = gmc_v8_0_check_soft_reset,
1709 	.pre_soft_reset = gmc_v8_0_pre_soft_reset,
1710 	.soft_reset = gmc_v8_0_soft_reset,
1711 	.post_soft_reset = gmc_v8_0_post_soft_reset,
1712 	.set_clockgating_state = gmc_v8_0_set_clockgating_state,
1713 	.set_powergating_state = gmc_v8_0_set_powergating_state,
1714 	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
1715 };
1716 
1717 static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1718 	.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1719 	.flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
1720 	.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1721 	.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1722 	.set_prt = gmc_v8_0_set_prt,
1723 	.get_vm_pde = gmc_v8_0_get_vm_pde,
1724 	.get_vm_pte = gmc_v8_0_get_vm_pte,
1725 	.get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size,
1726 };
1727 
1728 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1729 	.set = gmc_v8_0_vm_fault_interrupt_state,
1730 	.process = gmc_v8_0_process_interrupt,
1731 };
1732 
1733 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1734 {
1735 	adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1736 }
1737 
1738 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1739 {
1740 	adev->gmc.vm_fault.num_types = 1;
1741 	adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1742 }
1743 
1744 const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1745 {
1746 	.type = AMD_IP_BLOCK_TYPE_GMC,
1747 	.major = 8,
1748 	.minor = 0,
1749 	.rev = 0,
1750 	.funcs = &gmc_v8_0_ip_funcs,
1751 };
1752 
1753 const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1754 {
1755 	.type = AMD_IP_BLOCK_TYPE_GMC,
1756 	.major = 8,
1757 	.minor = 1,
1758 	.rev = 0,
1759 	.funcs = &gmc_v8_0_ip_funcs,
1760 };
1761 
1762 const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1763 {
1764 	.type = AMD_IP_BLOCK_TYPE_GMC,
1765 	.major = 8,
1766 	.minor = 5,
1767 	.rev = 0,
1768 	.funcs = &gmc_v8_0_ip_funcs,
1769 };
1770