xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 46f7b4de)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26 
27 #include <drm/drm_cache.h>
28 
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33 
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
42 
43 #include "soc15.h"
44 #include "soc15d.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
47 
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "gfxhub_v1_2.h"
53 #include "mmhub_v9_4.h"
54 #include "mmhub_v1_7.h"
55 #include "mmhub_v1_8.h"
56 #include "umc_v6_1.h"
57 #include "umc_v6_0.h"
58 #include "umc_v6_7.h"
59 #include "hdp_v4_0.h"
60 #include "mca_v3_0.h"
61 
62 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
63 
64 #include "amdgpu_ras.h"
65 #include "amdgpu_xgmi.h"
66 
67 #include "amdgpu_reset.h"
68 
69 /* add these here since we already include dce12 headers and these are for DCN */
70 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
71 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
72 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
73 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
74 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
75 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
76 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x049d
77 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
78 
79 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2                                                          0x05ea
80 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX                                                 2
81 
82 #define MAX_MEM_RANGES 8
83 
84 static const char *gfxhub_client_ids[] = {
85 	"CB",
86 	"DB",
87 	"IA",
88 	"WD",
89 	"CPF",
90 	"CPC",
91 	"CPG",
92 	"RLC",
93 	"TCP",
94 	"SQC (inst)",
95 	"SQC (data)",
96 	"SQG",
97 	"PA",
98 };
99 
100 static const char *mmhub_client_ids_raven[][2] = {
101 	[0][0] = "MP1",
102 	[1][0] = "MP0",
103 	[2][0] = "VCN",
104 	[3][0] = "VCNU",
105 	[4][0] = "HDP",
106 	[5][0] = "DCE",
107 	[13][0] = "UTCL2",
108 	[19][0] = "TLS",
109 	[26][0] = "OSS",
110 	[27][0] = "SDMA0",
111 	[0][1] = "MP1",
112 	[1][1] = "MP0",
113 	[2][1] = "VCN",
114 	[3][1] = "VCNU",
115 	[4][1] = "HDP",
116 	[5][1] = "XDP",
117 	[6][1] = "DBGU0",
118 	[7][1] = "DCE",
119 	[8][1] = "DCEDWB0",
120 	[9][1] = "DCEDWB1",
121 	[26][1] = "OSS",
122 	[27][1] = "SDMA0",
123 };
124 
125 static const char *mmhub_client_ids_renoir[][2] = {
126 	[0][0] = "MP1",
127 	[1][0] = "MP0",
128 	[2][0] = "HDP",
129 	[4][0] = "DCEDMC",
130 	[5][0] = "DCEVGA",
131 	[13][0] = "UTCL2",
132 	[19][0] = "TLS",
133 	[26][0] = "OSS",
134 	[27][0] = "SDMA0",
135 	[28][0] = "VCN",
136 	[29][0] = "VCNU",
137 	[30][0] = "JPEG",
138 	[0][1] = "MP1",
139 	[1][1] = "MP0",
140 	[2][1] = "HDP",
141 	[3][1] = "XDP",
142 	[6][1] = "DBGU0",
143 	[7][1] = "DCEDMC",
144 	[8][1] = "DCEVGA",
145 	[9][1] = "DCEDWB",
146 	[26][1] = "OSS",
147 	[27][1] = "SDMA0",
148 	[28][1] = "VCN",
149 	[29][1] = "VCNU",
150 	[30][1] = "JPEG",
151 };
152 
153 static const char *mmhub_client_ids_vega10[][2] = {
154 	[0][0] = "MP0",
155 	[1][0] = "UVD",
156 	[2][0] = "UVDU",
157 	[3][0] = "HDP",
158 	[13][0] = "UTCL2",
159 	[14][0] = "OSS",
160 	[15][0] = "SDMA1",
161 	[32+0][0] = "VCE0",
162 	[32+1][0] = "VCE0U",
163 	[32+2][0] = "XDMA",
164 	[32+3][0] = "DCE",
165 	[32+4][0] = "MP1",
166 	[32+14][0] = "SDMA0",
167 	[0][1] = "MP0",
168 	[1][1] = "UVD",
169 	[2][1] = "UVDU",
170 	[3][1] = "DBGU0",
171 	[4][1] = "HDP",
172 	[5][1] = "XDP",
173 	[14][1] = "OSS",
174 	[15][1] = "SDMA0",
175 	[32+0][1] = "VCE0",
176 	[32+1][1] = "VCE0U",
177 	[32+2][1] = "XDMA",
178 	[32+3][1] = "DCE",
179 	[32+4][1] = "DCEDWB",
180 	[32+5][1] = "MP1",
181 	[32+6][1] = "DBGU1",
182 	[32+14][1] = "SDMA1",
183 };
184 
185 static const char *mmhub_client_ids_vega12[][2] = {
186 	[0][0] = "MP0",
187 	[1][0] = "VCE0",
188 	[2][0] = "VCE0U",
189 	[3][0] = "HDP",
190 	[13][0] = "UTCL2",
191 	[14][0] = "OSS",
192 	[15][0] = "SDMA1",
193 	[32+0][0] = "DCE",
194 	[32+1][0] = "XDMA",
195 	[32+2][0] = "UVD",
196 	[32+3][0] = "UVDU",
197 	[32+4][0] = "MP1",
198 	[32+15][0] = "SDMA0",
199 	[0][1] = "MP0",
200 	[1][1] = "VCE0",
201 	[2][1] = "VCE0U",
202 	[3][1] = "DBGU0",
203 	[4][1] = "HDP",
204 	[5][1] = "XDP",
205 	[14][1] = "OSS",
206 	[15][1] = "SDMA0",
207 	[32+0][1] = "DCE",
208 	[32+1][1] = "DCEDWB",
209 	[32+2][1] = "XDMA",
210 	[32+3][1] = "UVD",
211 	[32+4][1] = "UVDU",
212 	[32+5][1] = "MP1",
213 	[32+6][1] = "DBGU1",
214 	[32+15][1] = "SDMA1",
215 };
216 
217 static const char *mmhub_client_ids_vega20[][2] = {
218 	[0][0] = "XDMA",
219 	[1][0] = "DCE",
220 	[2][0] = "VCE0",
221 	[3][0] = "VCE0U",
222 	[4][0] = "UVD",
223 	[5][0] = "UVD1U",
224 	[13][0] = "OSS",
225 	[14][0] = "HDP",
226 	[15][0] = "SDMA0",
227 	[32+0][0] = "UVD",
228 	[32+1][0] = "UVDU",
229 	[32+2][0] = "MP1",
230 	[32+3][0] = "MP0",
231 	[32+12][0] = "UTCL2",
232 	[32+14][0] = "SDMA1",
233 	[0][1] = "XDMA",
234 	[1][1] = "DCE",
235 	[2][1] = "DCEDWB",
236 	[3][1] = "VCE0",
237 	[4][1] = "VCE0U",
238 	[5][1] = "UVD1",
239 	[6][1] = "UVD1U",
240 	[7][1] = "DBGU0",
241 	[8][1] = "XDP",
242 	[13][1] = "OSS",
243 	[14][1] = "HDP",
244 	[15][1] = "SDMA0",
245 	[32+0][1] = "UVD",
246 	[32+1][1] = "UVDU",
247 	[32+2][1] = "DBGU1",
248 	[32+3][1] = "MP1",
249 	[32+4][1] = "MP0",
250 	[32+14][1] = "SDMA1",
251 };
252 
253 static const char *mmhub_client_ids_arcturus[][2] = {
254 	[0][0] = "DBGU1",
255 	[1][0] = "XDP",
256 	[2][0] = "MP1",
257 	[14][0] = "HDP",
258 	[171][0] = "JPEG",
259 	[172][0] = "VCN",
260 	[173][0] = "VCNU",
261 	[203][0] = "JPEG1",
262 	[204][0] = "VCN1",
263 	[205][0] = "VCN1U",
264 	[256][0] = "SDMA0",
265 	[257][0] = "SDMA1",
266 	[258][0] = "SDMA2",
267 	[259][0] = "SDMA3",
268 	[260][0] = "SDMA4",
269 	[261][0] = "SDMA5",
270 	[262][0] = "SDMA6",
271 	[263][0] = "SDMA7",
272 	[384][0] = "OSS",
273 	[0][1] = "DBGU1",
274 	[1][1] = "XDP",
275 	[2][1] = "MP1",
276 	[14][1] = "HDP",
277 	[171][1] = "JPEG",
278 	[172][1] = "VCN",
279 	[173][1] = "VCNU",
280 	[203][1] = "JPEG1",
281 	[204][1] = "VCN1",
282 	[205][1] = "VCN1U",
283 	[256][1] = "SDMA0",
284 	[257][1] = "SDMA1",
285 	[258][1] = "SDMA2",
286 	[259][1] = "SDMA3",
287 	[260][1] = "SDMA4",
288 	[261][1] = "SDMA5",
289 	[262][1] = "SDMA6",
290 	[263][1] = "SDMA7",
291 	[384][1] = "OSS",
292 };
293 
294 static const char *mmhub_client_ids_aldebaran[][2] = {
295 	[2][0] = "MP1",
296 	[3][0] = "MP0",
297 	[32+1][0] = "DBGU_IO0",
298 	[32+2][0] = "DBGU_IO2",
299 	[32+4][0] = "MPIO",
300 	[96+11][0] = "JPEG0",
301 	[96+12][0] = "VCN0",
302 	[96+13][0] = "VCNU0",
303 	[128+11][0] = "JPEG1",
304 	[128+12][0] = "VCN1",
305 	[128+13][0] = "VCNU1",
306 	[160+1][0] = "XDP",
307 	[160+14][0] = "HDP",
308 	[256+0][0] = "SDMA0",
309 	[256+1][0] = "SDMA1",
310 	[256+2][0] = "SDMA2",
311 	[256+3][0] = "SDMA3",
312 	[256+4][0] = "SDMA4",
313 	[384+0][0] = "OSS",
314 	[2][1] = "MP1",
315 	[3][1] = "MP0",
316 	[32+1][1] = "DBGU_IO0",
317 	[32+2][1] = "DBGU_IO2",
318 	[32+4][1] = "MPIO",
319 	[96+11][1] = "JPEG0",
320 	[96+12][1] = "VCN0",
321 	[96+13][1] = "VCNU0",
322 	[128+11][1] = "JPEG1",
323 	[128+12][1] = "VCN1",
324 	[128+13][1] = "VCNU1",
325 	[160+1][1] = "XDP",
326 	[160+14][1] = "HDP",
327 	[256+0][1] = "SDMA0",
328 	[256+1][1] = "SDMA1",
329 	[256+2][1] = "SDMA2",
330 	[256+3][1] = "SDMA3",
331 	[256+4][1] = "SDMA4",
332 	[384+0][1] = "OSS",
333 };
334 
335 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
336 {
337 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
338 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
339 };
340 
341 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
342 {
343 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
344 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
345 };
346 
347 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
348 	(0x000143c0 + 0x00000000),
349 	(0x000143c0 + 0x00000800),
350 	(0x000143c0 + 0x00001000),
351 	(0x000143c0 + 0x00001800),
352 	(0x000543c0 + 0x00000000),
353 	(0x000543c0 + 0x00000800),
354 	(0x000543c0 + 0x00001000),
355 	(0x000543c0 + 0x00001800),
356 	(0x000943c0 + 0x00000000),
357 	(0x000943c0 + 0x00000800),
358 	(0x000943c0 + 0x00001000),
359 	(0x000943c0 + 0x00001800),
360 	(0x000d43c0 + 0x00000000),
361 	(0x000d43c0 + 0x00000800),
362 	(0x000d43c0 + 0x00001000),
363 	(0x000d43c0 + 0x00001800),
364 	(0x001143c0 + 0x00000000),
365 	(0x001143c0 + 0x00000800),
366 	(0x001143c0 + 0x00001000),
367 	(0x001143c0 + 0x00001800),
368 	(0x001543c0 + 0x00000000),
369 	(0x001543c0 + 0x00000800),
370 	(0x001543c0 + 0x00001000),
371 	(0x001543c0 + 0x00001800),
372 	(0x001943c0 + 0x00000000),
373 	(0x001943c0 + 0x00000800),
374 	(0x001943c0 + 0x00001000),
375 	(0x001943c0 + 0x00001800),
376 	(0x001d43c0 + 0x00000000),
377 	(0x001d43c0 + 0x00000800),
378 	(0x001d43c0 + 0x00001000),
379 	(0x001d43c0 + 0x00001800),
380 };
381 
382 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
383 	(0x000143e0 + 0x00000000),
384 	(0x000143e0 + 0x00000800),
385 	(0x000143e0 + 0x00001000),
386 	(0x000143e0 + 0x00001800),
387 	(0x000543e0 + 0x00000000),
388 	(0x000543e0 + 0x00000800),
389 	(0x000543e0 + 0x00001000),
390 	(0x000543e0 + 0x00001800),
391 	(0x000943e0 + 0x00000000),
392 	(0x000943e0 + 0x00000800),
393 	(0x000943e0 + 0x00001000),
394 	(0x000943e0 + 0x00001800),
395 	(0x000d43e0 + 0x00000000),
396 	(0x000d43e0 + 0x00000800),
397 	(0x000d43e0 + 0x00001000),
398 	(0x000d43e0 + 0x00001800),
399 	(0x001143e0 + 0x00000000),
400 	(0x001143e0 + 0x00000800),
401 	(0x001143e0 + 0x00001000),
402 	(0x001143e0 + 0x00001800),
403 	(0x001543e0 + 0x00000000),
404 	(0x001543e0 + 0x00000800),
405 	(0x001543e0 + 0x00001000),
406 	(0x001543e0 + 0x00001800),
407 	(0x001943e0 + 0x00000000),
408 	(0x001943e0 + 0x00000800),
409 	(0x001943e0 + 0x00001000),
410 	(0x001943e0 + 0x00001800),
411 	(0x001d43e0 + 0x00000000),
412 	(0x001d43e0 + 0x00000800),
413 	(0x001d43e0 + 0x00001000),
414 	(0x001d43e0 + 0x00001800),
415 };
416 
417 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
418 		struct amdgpu_irq_src *src,
419 		unsigned type,
420 		enum amdgpu_interrupt_state state)
421 {
422 	u32 bits, i, tmp, reg;
423 
424 	/* Devices newer then VEGA10/12 shall have these programming
425 	     sequences performed by PSP BL */
426 	if (adev->asic_type >= CHIP_VEGA20)
427 		return 0;
428 
429 	bits = 0x7f;
430 
431 	switch (state) {
432 	case AMDGPU_IRQ_STATE_DISABLE:
433 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
434 			reg = ecc_umc_mcumc_ctrl_addrs[i];
435 			tmp = RREG32(reg);
436 			tmp &= ~bits;
437 			WREG32(reg, tmp);
438 		}
439 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
440 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
441 			tmp = RREG32(reg);
442 			tmp &= ~bits;
443 			WREG32(reg, tmp);
444 		}
445 		break;
446 	case AMDGPU_IRQ_STATE_ENABLE:
447 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
448 			reg = ecc_umc_mcumc_ctrl_addrs[i];
449 			tmp = RREG32(reg);
450 			tmp |= bits;
451 			WREG32(reg, tmp);
452 		}
453 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
454 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
455 			tmp = RREG32(reg);
456 			tmp |= bits;
457 			WREG32(reg, tmp);
458 		}
459 		break;
460 	default:
461 		break;
462 	}
463 
464 	return 0;
465 }
466 
467 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
468 					struct amdgpu_irq_src *src,
469 					unsigned type,
470 					enum amdgpu_interrupt_state state)
471 {
472 	struct amdgpu_vmhub *hub;
473 	u32 tmp, reg, bits, i, j;
474 
475 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
477 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
478 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
479 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
480 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
481 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
482 
483 	switch (state) {
484 	case AMDGPU_IRQ_STATE_DISABLE:
485 		for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
486 			hub = &adev->vmhub[j];
487 			for (i = 0; i < 16; i++) {
488 				reg = hub->vm_context0_cntl + i;
489 
490 				/* This works because this interrupt is only
491 				 * enabled at init/resume and disabled in
492 				 * fini/suspend, so the overall state doesn't
493 				 * change over the course of suspend/resume.
494 				 */
495 				if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
496 					continue;
497 
498 				if (j >= AMDGPU_MMHUB0(0))
499 					tmp = RREG32_SOC15_IP(MMHUB, reg);
500 				else
501 					tmp = RREG32_SOC15_IP(GC, reg);
502 
503 				tmp &= ~bits;
504 
505 				if (j >= AMDGPU_MMHUB0(0))
506 					WREG32_SOC15_IP(MMHUB, reg, tmp);
507 				else
508 					WREG32_SOC15_IP(GC, reg, tmp);
509 			}
510 		}
511 		break;
512 	case AMDGPU_IRQ_STATE_ENABLE:
513 		for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
514 			hub = &adev->vmhub[j];
515 			for (i = 0; i < 16; i++) {
516 				reg = hub->vm_context0_cntl + i;
517 
518 				/* This works because this interrupt is only
519 				 * enabled at init/resume and disabled in
520 				 * fini/suspend, so the overall state doesn't
521 				 * change over the course of suspend/resume.
522 				 */
523 				if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
524 					continue;
525 
526 				if (j >= AMDGPU_MMHUB0(0))
527 					tmp = RREG32_SOC15_IP(MMHUB, reg);
528 				else
529 					tmp = RREG32_SOC15_IP(GC, reg);
530 
531 				tmp |= bits;
532 
533 				if (j >= AMDGPU_MMHUB0(0))
534 					WREG32_SOC15_IP(MMHUB, reg, tmp);
535 				else
536 					WREG32_SOC15_IP(GC, reg, tmp);
537 			}
538 		}
539 		break;
540 	default:
541 		break;
542 	}
543 
544 	return 0;
545 }
546 
547 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
548 				      struct amdgpu_irq_src *source,
549 				      struct amdgpu_iv_entry *entry)
550 {
551 	bool retry_fault = !!(entry->src_data[1] & 0x80);
552 	bool write_fault = !!(entry->src_data[1] & 0x20);
553 	uint32_t status = 0, cid = 0, rw = 0;
554 	struct amdgpu_task_info task_info;
555 	struct amdgpu_vmhub *hub;
556 	const char *mmhub_cid;
557 	const char *hub_name;
558 	u64 addr;
559 	uint32_t cam_index = 0;
560 	int ret;
561 	uint32_t node_id, xcc_id = 0;
562 
563 	node_id = entry->node_id;
564 
565 	addr = (u64)entry->src_data[0] << 12;
566 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
567 
568 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
569 		hub_name = "mmhub0";
570 		hub = &adev->vmhub[AMDGPU_MMHUB0(node_id / 4)];
571 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
572 		hub_name = "mmhub1";
573 		hub = &adev->vmhub[AMDGPU_MMHUB1(0)];
574 	} else {
575 		hub_name = "gfxhub0";
576 		if (adev->gfx.funcs->ih_node_to_logical_xcc) {
577 			xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
578 				node_id);
579 			if (xcc_id < 0)
580 				xcc_id = 0;
581 		}
582 		hub = &adev->vmhub[xcc_id];
583 	}
584 
585 	if (retry_fault) {
586 		if (adev->irq.retry_cam_enabled) {
587 			/* Delegate it to a different ring if the hardware hasn't
588 			 * already done it.
589 			 */
590 			if (entry->ih == &adev->irq.ih) {
591 				amdgpu_irq_delegate(adev, entry, 8);
592 				return 1;
593 			}
594 
595 			cam_index = entry->src_data[2] & 0x3ff;
596 
597 			ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
598 						     addr, write_fault);
599 			WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
600 			if (ret)
601 				return 1;
602 		} else {
603 			/* Process it onyl if it's the first fault for this address */
604 			if (entry->ih != &adev->irq.ih_soft &&
605 			    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
606 					     entry->timestamp))
607 				return 1;
608 
609 			/* Delegate it to a different ring if the hardware hasn't
610 			 * already done it.
611 			 */
612 			if (entry->ih == &adev->irq.ih) {
613 				amdgpu_irq_delegate(adev, entry, 8);
614 				return 1;
615 			}
616 
617 			/* Try to handle the recoverable page faults by filling page
618 			 * tables
619 			 */
620 			if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
621 						   addr, write_fault))
622 				return 1;
623 		}
624 	}
625 
626 	if (!printk_ratelimit())
627 		return 0;
628 
629 
630 	memset(&task_info, 0, sizeof(struct amdgpu_task_info));
631 	amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
632 
633 	dev_err(adev->dev,
634 		"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
635 		"pasid:%u, for process %s pid %d thread %s pid %d)\n",
636 		hub_name, retry_fault ? "retry" : "no-retry",
637 		entry->src_id, entry->ring_id, entry->vmid,
638 		entry->pasid, task_info.process_name, task_info.tgid,
639 		task_info.task_name, task_info.pid);
640 	dev_err(adev->dev, "  in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
641 		addr, entry->client_id,
642 		soc15_ih_clientid_name[entry->client_id]);
643 
644 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
645 		dev_err(adev->dev, "  cookie node_id %d fault from die %s%d%s\n",
646 			node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
647 			node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
648 
649 	if (amdgpu_sriov_vf(adev))
650 		return 0;
651 
652 	/*
653 	 * Issue a dummy read to wait for the status register to
654 	 * be updated to avoid reading an incorrect value due to
655 	 * the new fast GRBM interface.
656 	 */
657 	if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
658 	    (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
659 		RREG32(hub->vm_l2_pro_fault_status);
660 
661 	status = RREG32(hub->vm_l2_pro_fault_status);
662 	cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
663 	rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
664 	WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
665 
666 	dev_err(adev->dev,
667 		"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
668 		status);
669 	if (entry->vmid_src == AMDGPU_GFXHUB(0)) {
670 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
671 			cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
672 			gfxhub_client_ids[cid],
673 			cid);
674 	} else {
675 		switch (adev->ip_versions[MMHUB_HWIP][0]) {
676 		case IP_VERSION(9, 0, 0):
677 			mmhub_cid = mmhub_client_ids_vega10[cid][rw];
678 			break;
679 		case IP_VERSION(9, 3, 0):
680 			mmhub_cid = mmhub_client_ids_vega12[cid][rw];
681 			break;
682 		case IP_VERSION(9, 4, 0):
683 			mmhub_cid = mmhub_client_ids_vega20[cid][rw];
684 			break;
685 		case IP_VERSION(9, 4, 1):
686 			mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
687 			break;
688 		case IP_VERSION(9, 1, 0):
689 		case IP_VERSION(9, 2, 0):
690 			mmhub_cid = mmhub_client_ids_raven[cid][rw];
691 			break;
692 		case IP_VERSION(1, 5, 0):
693 		case IP_VERSION(2, 4, 0):
694 			mmhub_cid = mmhub_client_ids_renoir[cid][rw];
695 			break;
696 		case IP_VERSION(1, 8, 0):
697 		case IP_VERSION(9, 4, 2):
698 			mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
699 			break;
700 		default:
701 			mmhub_cid = NULL;
702 			break;
703 		}
704 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
705 			mmhub_cid ? mmhub_cid : "unknown", cid);
706 	}
707 	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
708 		REG_GET_FIELD(status,
709 		VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
710 	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
711 		REG_GET_FIELD(status,
712 		VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
713 	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
714 		REG_GET_FIELD(status,
715 		VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
716 	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
717 		REG_GET_FIELD(status,
718 		VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
719 	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
720 	return 0;
721 }
722 
723 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
724 	.set = gmc_v9_0_vm_fault_interrupt_state,
725 	.process = gmc_v9_0_process_interrupt,
726 };
727 
728 
729 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
730 	.set = gmc_v9_0_ecc_interrupt_state,
731 	.process = amdgpu_umc_process_ecc_irq,
732 };
733 
734 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
735 {
736 	adev->gmc.vm_fault.num_types = 1;
737 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
738 
739 	if (!amdgpu_sriov_vf(adev) &&
740 	    !adev->gmc.xgmi.connected_to_cpu) {
741 		adev->gmc.ecc_irq.num_types = 1;
742 		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
743 	}
744 }
745 
746 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
747 					uint32_t flush_type)
748 {
749 	u32 req = 0;
750 
751 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
752 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
753 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
754 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
755 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
756 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
757 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
758 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
759 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
760 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
761 
762 	return req;
763 }
764 
765 /**
766  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
767  *
768  * @adev: amdgpu_device pointer
769  * @vmhub: vmhub type
770  *
771  */
772 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
773 				       uint32_t vmhub)
774 {
775 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
776 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
777 		return false;
778 
779 	return ((vmhub == AMDGPU_MMHUB0(0) ||
780 		 vmhub == AMDGPU_MMHUB1(0)) &&
781 		(!amdgpu_sriov_vf(adev)) &&
782 		(!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
783 		   (adev->apu_flags & AMD_APU_IS_PICASSO))));
784 }
785 
786 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
787 					uint8_t vmid, uint16_t *p_pasid)
788 {
789 	uint32_t value;
790 
791 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
792 		     + vmid);
793 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
794 
795 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
796 }
797 
798 /*
799  * GART
800  * VMID 0 is the physical GPU addresses as used by the kernel.
801  * VMIDs 1-15 are used for userspace clients and are handled
802  * by the amdgpu vm/hsa code.
803  */
804 
805 /**
806  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
807  *
808  * @adev: amdgpu_device pointer
809  * @vmid: vm instance to flush
810  * @vmhub: which hub to flush
811  * @flush_type: the flush type
812  *
813  * Flush the TLB for the requested page table using certain type.
814  */
815 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
816 					uint32_t vmhub, uint32_t flush_type)
817 {
818 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
819 	const unsigned eng = 17;
820 	u32 j, inv_req, inv_req2, tmp;
821 	struct amdgpu_vmhub *hub;
822 
823 	BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
824 
825 	hub = &adev->vmhub[vmhub];
826 	if (adev->gmc.xgmi.num_physical_nodes &&
827 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) {
828 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
829 		 * heavy-weight TLB flush (type 2), which flushes
830 		 * both. Due to a race condition with concurrent
831 		 * memory accesses using the same TLB cache line, we
832 		 * still need a second TLB flush after this.
833 		 */
834 		inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
835 		inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
836 	} else {
837 		inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
838 		inv_req2 = 0;
839 	}
840 
841 	/* This is necessary for a HW workaround under SRIOV as well
842 	 * as GFXOFF under bare metal
843 	 */
844 	if (adev->gfx.kiq[0].ring.sched.ready &&
845 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
846 	    down_read_trylock(&adev->reset_domain->sem)) {
847 		uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
848 		uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
849 
850 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
851 						   1 << vmid);
852 		up_read(&adev->reset_domain->sem);
853 		return;
854 	}
855 
856 	spin_lock(&adev->gmc.invalidate_lock);
857 
858 	/*
859 	 * It may lose gpuvm invalidate acknowldege state across power-gating
860 	 * off cycle, add semaphore acquire before invalidation and semaphore
861 	 * release after invalidation to avoid entering power gated state
862 	 * to WA the Issue
863 	 */
864 
865 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
866 	if (use_semaphore) {
867 		for (j = 0; j < adev->usec_timeout; j++) {
868 			/* a read return value of 1 means semaphore acquire */
869 			if (vmhub >= AMDGPU_MMHUB0(0))
870 				tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
871 			else
872 				tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
873 			if (tmp & 0x1)
874 				break;
875 			udelay(1);
876 		}
877 
878 		if (j >= adev->usec_timeout)
879 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
880 	}
881 
882 	do {
883 		if (vmhub >= AMDGPU_MMHUB0(0))
884 			WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
885 		else
886 			WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
887 
888 		/*
889 		 * Issue a dummy read to wait for the ACK register to
890 		 * be cleared to avoid a false ACK due to the new fast
891 		 * GRBM interface.
892 		 */
893 		if ((vmhub == AMDGPU_GFXHUB(0)) &&
894 		    (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
895 			RREG32_NO_KIQ(hub->vm_inv_eng0_req +
896 				      hub->eng_distance * eng);
897 
898 		for (j = 0; j < adev->usec_timeout; j++) {
899 			if (vmhub >= AMDGPU_MMHUB0(0))
900 				tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
901 			else
902 				tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
903 			if (tmp & (1 << vmid))
904 				break;
905 			udelay(1);
906 		}
907 
908 		inv_req = inv_req2;
909 		inv_req2 = 0;
910 	} while (inv_req);
911 
912 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
913 	if (use_semaphore) {
914 		/*
915 		 * add semaphore release after invalidation,
916 		 * write with 0 means semaphore release
917 		 */
918 		if (vmhub >= AMDGPU_MMHUB0(0))
919 			WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
920 		else
921 			WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
922 	}
923 
924 	spin_unlock(&adev->gmc.invalidate_lock);
925 
926 	if (j < adev->usec_timeout)
927 		return;
928 
929 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
930 }
931 
932 /**
933  * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
934  *
935  * @adev: amdgpu_device pointer
936  * @pasid: pasid to be flush
937  * @flush_type: the flush type
938  * @all_hub: flush all hubs
939  *
940  * Flush the TLB for the requested pasid.
941  */
942 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
943 					uint16_t pasid, uint32_t flush_type,
944 					bool all_hub, uint32_t inst)
945 {
946 	int vmid, i;
947 	signed long r;
948 	uint32_t seq;
949 	uint16_t queried_pasid;
950 	bool ret;
951 	u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
952 	struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
953 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
954 
955 	if (amdgpu_in_reset(adev))
956 		return -EIO;
957 
958 	if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) {
959 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
960 		 * heavy-weight TLB flush (type 2), which flushes
961 		 * both. Due to a race condition with concurrent
962 		 * memory accesses using the same TLB cache line, we
963 		 * still need a second TLB flush after this.
964 		 */
965 		bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
966 				       adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0));
967 		/* 2 dwords flush + 8 dwords fence */
968 		unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
969 
970 		if (vega20_xgmi_wa)
971 			ndw += kiq->pmf->invalidate_tlbs_size;
972 
973 		spin_lock(&adev->gfx.kiq[inst].ring_lock);
974 		/* 2 dwords flush + 8 dwords fence */
975 		amdgpu_ring_alloc(ring, ndw);
976 		if (vega20_xgmi_wa)
977 			kiq->pmf->kiq_invalidate_tlbs(ring,
978 						      pasid, 2, all_hub);
979 		kiq->pmf->kiq_invalidate_tlbs(ring,
980 					pasid, flush_type, all_hub);
981 		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
982 		if (r) {
983 			amdgpu_ring_undo(ring);
984 			spin_unlock(&adev->gfx.kiq[inst].ring_lock);
985 			up_read(&adev->reset_domain->sem);
986 			return -ETIME;
987 		}
988 
989 		amdgpu_ring_commit(ring);
990 		spin_unlock(&adev->gfx.kiq[inst].ring_lock);
991 		r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
992 		if (r < 1) {
993 			dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
994 			up_read(&adev->reset_domain->sem);
995 			return -ETIME;
996 		}
997 		up_read(&adev->reset_domain->sem);
998 		return 0;
999 	}
1000 
1001 	for (vmid = 1; vmid < 16; vmid++) {
1002 
1003 		ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
1004 				&queried_pasid);
1005 		if (ret && queried_pasid == pasid) {
1006 			if (all_hub) {
1007 				for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
1008 					gmc_v9_0_flush_gpu_tlb(adev, vmid,
1009 							i, flush_type);
1010 			} else {
1011 				gmc_v9_0_flush_gpu_tlb(adev, vmid,
1012 						AMDGPU_GFXHUB(0), flush_type);
1013 			}
1014 			break;
1015 		}
1016 	}
1017 
1018 	return 0;
1019 
1020 }
1021 
1022 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
1023 					    unsigned vmid, uint64_t pd_addr)
1024 {
1025 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
1026 	struct amdgpu_device *adev = ring->adev;
1027 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
1028 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
1029 	unsigned eng = ring->vm_inv_eng;
1030 
1031 	/*
1032 	 * It may lose gpuvm invalidate acknowldege state across power-gating
1033 	 * off cycle, add semaphore acquire before invalidation and semaphore
1034 	 * release after invalidation to avoid entering power gated state
1035 	 * to WA the Issue
1036 	 */
1037 
1038 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
1039 	if (use_semaphore)
1040 		/* a read return value of 1 means semaphore acuqire */
1041 		amdgpu_ring_emit_reg_wait(ring,
1042 					  hub->vm_inv_eng0_sem +
1043 					  hub->eng_distance * eng, 0x1, 0x1);
1044 
1045 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
1046 			      (hub->ctx_addr_distance * vmid),
1047 			      lower_32_bits(pd_addr));
1048 
1049 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
1050 			      (hub->ctx_addr_distance * vmid),
1051 			      upper_32_bits(pd_addr));
1052 
1053 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
1054 					    hub->eng_distance * eng,
1055 					    hub->vm_inv_eng0_ack +
1056 					    hub->eng_distance * eng,
1057 					    req, 1 << vmid);
1058 
1059 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
1060 	if (use_semaphore)
1061 		/*
1062 		 * add semaphore release after invalidation,
1063 		 * write with 0 means semaphore release
1064 		 */
1065 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
1066 				      hub->eng_distance * eng, 0);
1067 
1068 	return pd_addr;
1069 }
1070 
1071 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
1072 					unsigned pasid)
1073 {
1074 	struct amdgpu_device *adev = ring->adev;
1075 	uint32_t reg;
1076 
1077 	/* Do nothing because there's no lut register for mmhub1. */
1078 	if (ring->vm_hub == AMDGPU_MMHUB1(0))
1079 		return;
1080 
1081 	if (ring->vm_hub == AMDGPU_GFXHUB(0))
1082 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
1083 	else
1084 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
1085 
1086 	amdgpu_ring_emit_wreg(ring, reg, pasid);
1087 }
1088 
1089 /*
1090  * PTE format on VEGA 10:
1091  * 63:59 reserved
1092  * 58:57 mtype
1093  * 56 F
1094  * 55 L
1095  * 54 P
1096  * 53 SW
1097  * 52 T
1098  * 50:48 reserved
1099  * 47:12 4k physical page base address
1100  * 11:7 fragment
1101  * 6 write
1102  * 5 read
1103  * 4 exe
1104  * 3 Z
1105  * 2 snooped
1106  * 1 system
1107  * 0 valid
1108  *
1109  * PDE format on VEGA 10:
1110  * 63:59 block fragment size
1111  * 58:55 reserved
1112  * 54 P
1113  * 53:48 reserved
1114  * 47:6 physical base address of PD or PTE
1115  * 5:3 reserved
1116  * 2 C
1117  * 1 system
1118  * 0 valid
1119  */
1120 
1121 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1122 
1123 {
1124 	switch (flags) {
1125 	case AMDGPU_VM_MTYPE_DEFAULT:
1126 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1127 	case AMDGPU_VM_MTYPE_NC:
1128 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1129 	case AMDGPU_VM_MTYPE_WC:
1130 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1131 	case AMDGPU_VM_MTYPE_RW:
1132 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1133 	case AMDGPU_VM_MTYPE_CC:
1134 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1135 	case AMDGPU_VM_MTYPE_UC:
1136 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1137 	default:
1138 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1139 	}
1140 }
1141 
1142 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1143 				uint64_t *addr, uint64_t *flags)
1144 {
1145 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1146 		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1147 	BUG_ON(*addr & 0xFFFF00000000003FULL);
1148 
1149 	if (!adev->gmc.translate_further)
1150 		return;
1151 
1152 	if (level == AMDGPU_VM_PDB1) {
1153 		/* Set the block fragment size */
1154 		if (!(*flags & AMDGPU_PDE_PTE))
1155 			*flags |= AMDGPU_PDE_BFS(0x9);
1156 
1157 	} else if (level == AMDGPU_VM_PDB0) {
1158 		if (*flags & AMDGPU_PDE_PTE) {
1159 			*flags &= ~AMDGPU_PDE_PTE;
1160 			if (!(*flags & AMDGPU_PTE_VALID))
1161 				*addr |= 1 << PAGE_SHIFT;
1162 		} else {
1163 			*flags |= AMDGPU_PTE_TF;
1164 		}
1165 	}
1166 }
1167 
1168 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
1169 					 struct amdgpu_bo *bo,
1170 					 struct amdgpu_bo_va_mapping *mapping,
1171 					 uint64_t *flags)
1172 {
1173 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1174 	bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
1175 	bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
1176 	bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
1177 	unsigned int mtype;
1178 	bool snoop = false;
1179 
1180 	switch (adev->ip_versions[GC_HWIP][0]) {
1181 	case IP_VERSION(9, 4, 1):
1182 	case IP_VERSION(9, 4, 2):
1183 		if (is_vram) {
1184 			if (bo_adev == adev) {
1185 				if (uncached)
1186 					mtype = MTYPE_UC;
1187 				else if (coherent)
1188 					mtype = MTYPE_CC;
1189 				else
1190 					mtype = MTYPE_RW;
1191 				/* FIXME: is this still needed? Or does
1192 				 * amdgpu_ttm_tt_pde_flags already handle this?
1193 				 */
1194 				if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
1195 				     adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) &&
1196 				    adev->gmc.xgmi.connected_to_cpu)
1197 					snoop = true;
1198 			} else {
1199 				if (uncached || coherent)
1200 					mtype = MTYPE_UC;
1201 				else
1202 					mtype = MTYPE_NC;
1203 				if (mapping->bo_va->is_xgmi)
1204 					snoop = true;
1205 			}
1206 		} else {
1207 			if (uncached || coherent)
1208 				mtype = MTYPE_UC;
1209 			else
1210 				mtype = MTYPE_NC;
1211 			/* FIXME: is this still needed? Or does
1212 			 * amdgpu_ttm_tt_pde_flags already handle this?
1213 			 */
1214 			snoop = true;
1215 		}
1216 		break;
1217 	case IP_VERSION(9, 4, 3):
1218 		/* FIXME: Needs more work for handling multiple memory
1219 		 * partitions (> NPS1 mode) e.g. NPS4 for both APU and dGPU
1220 		 * modes.
1221 		 */
1222 		snoop = true;
1223 		if (uncached) {
1224 			mtype = MTYPE_UC;
1225 		} else if (adev->gmc.is_app_apu) {
1226 			/* FIXME: APU in native mode, NPS1 single socket only
1227 			 *
1228 			 * For suporting NUMA partitioned APU e.g. in NPS4 mode,
1229 			 * this need to look at the NUMA node on which the
1230 			 * system memory allocation was done.
1231 			 *
1232 			 * Memory access by a different partition within same
1233 			 * socket should be treated as remote access so MTYPE_RW
1234 			 * cannot be used always.
1235 			 */
1236 			mtype = MTYPE_RW;
1237 		} else if (adev->flags & AMD_IS_APU) {
1238 			/* APU on carve out mode */
1239 			mtype = MTYPE_RW;
1240 		} else {
1241 			/* dGPU */
1242 			if (is_vram && bo_adev == adev)
1243 				mtype = MTYPE_RW;
1244 			else if (is_vram)
1245 				mtype = MTYPE_NC;
1246 			else
1247 				mtype = MTYPE_UC;
1248 		}
1249 
1250 		break;
1251 	default:
1252 		if (uncached || coherent)
1253 			mtype = MTYPE_UC;
1254 		else
1255 			mtype = MTYPE_NC;
1256 
1257 		/* FIXME: is this still needed? Or does
1258 		 * amdgpu_ttm_tt_pde_flags already handle this?
1259 		 */
1260 		if (!is_vram)
1261 			snoop = true;
1262 	}
1263 
1264 	if (mtype != MTYPE_NC)
1265 		*flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1266 			 AMDGPU_PTE_MTYPE_VG10(mtype);
1267 	*flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1268 }
1269 
1270 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1271 				struct amdgpu_bo_va_mapping *mapping,
1272 				uint64_t *flags)
1273 {
1274 	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
1275 
1276 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
1277 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1278 
1279 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1280 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1281 
1282 	if (mapping->flags & AMDGPU_PTE_PRT) {
1283 		*flags |= AMDGPU_PTE_PRT;
1284 		*flags &= ~AMDGPU_PTE_VALID;
1285 	}
1286 
1287 	if (bo && bo->tbo.resource)
1288 		gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
1289 					     mapping, flags);
1290 }
1291 
1292 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1293 {
1294 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1295 	unsigned size;
1296 
1297 	/* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1298 
1299 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1300 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
1301 	} else {
1302 		u32 viewport;
1303 
1304 		switch (adev->ip_versions[DCE_HWIP][0]) {
1305 		case IP_VERSION(1, 0, 0):
1306 		case IP_VERSION(1, 0, 1):
1307 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1308 			size = (REG_GET_FIELD(viewport,
1309 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1310 				REG_GET_FIELD(viewport,
1311 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1312 				4);
1313 			break;
1314 		case IP_VERSION(2, 1, 0):
1315 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1316 			size = (REG_GET_FIELD(viewport,
1317 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1318 				REG_GET_FIELD(viewport,
1319 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1320 				4);
1321 			break;
1322 		default:
1323 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1324 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1325 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1326 				4);
1327 			break;
1328 		}
1329 	}
1330 
1331 	return size;
1332 }
1333 
1334 static enum amdgpu_memory_partition
1335 gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
1336 {
1337 	enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
1338 
1339 	if (adev->nbio.funcs->get_memory_partition_mode)
1340 		mode = adev->nbio.funcs->get_memory_partition_mode(adev,
1341 								   supp_modes);
1342 
1343 	return mode;
1344 }
1345 
1346 static enum amdgpu_memory_partition
1347 gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
1348 {
1349 	if (amdgpu_sriov_vf(adev))
1350 		return AMDGPU_NPS1_PARTITION_MODE;
1351 
1352 	return gmc_v9_0_get_memory_partition(adev, NULL);
1353 }
1354 
1355 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1356 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1357 	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1358 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1359 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1360 	.map_mtype = gmc_v9_0_map_mtype,
1361 	.get_vm_pde = gmc_v9_0_get_vm_pde,
1362 	.get_vm_pte = gmc_v9_0_get_vm_pte,
1363 	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1364 	.query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
1365 };
1366 
1367 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1368 {
1369 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1370 }
1371 
1372 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1373 {
1374 	switch (adev->ip_versions[UMC_HWIP][0]) {
1375 	case IP_VERSION(6, 0, 0):
1376 		adev->umc.funcs = &umc_v6_0_funcs;
1377 		break;
1378 	case IP_VERSION(6, 1, 1):
1379 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1380 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1381 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1382 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1383 		adev->umc.retire_unit = 1;
1384 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1385 		adev->umc.ras = &umc_v6_1_ras;
1386 		break;
1387 	case IP_VERSION(6, 1, 2):
1388 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1389 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1390 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1391 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1392 		adev->umc.retire_unit = 1;
1393 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1394 		adev->umc.ras = &umc_v6_1_ras;
1395 		break;
1396 	case IP_VERSION(6, 7, 0):
1397 		adev->umc.max_ras_err_cnt_per_query =
1398 			UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1399 		adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1400 		adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1401 		adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1402 		adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
1403 		if (!adev->gmc.xgmi.connected_to_cpu)
1404 			adev->umc.ras = &umc_v6_7_ras;
1405 		if (1 & adev->smuio.funcs->get_die_id(adev))
1406 			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1407 		else
1408 			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1409 		break;
1410 	default:
1411 		break;
1412 	}
1413 }
1414 
1415 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1416 {
1417 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
1418 	case IP_VERSION(9, 4, 1):
1419 		adev->mmhub.funcs = &mmhub_v9_4_funcs;
1420 		break;
1421 	case IP_VERSION(9, 4, 2):
1422 		adev->mmhub.funcs = &mmhub_v1_7_funcs;
1423 		break;
1424 	case IP_VERSION(1, 8, 0):
1425 		adev->mmhub.funcs = &mmhub_v1_8_funcs;
1426 		break;
1427 	default:
1428 		adev->mmhub.funcs = &mmhub_v1_0_funcs;
1429 		break;
1430 	}
1431 }
1432 
1433 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1434 {
1435 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
1436 	case IP_VERSION(9, 4, 0):
1437 		adev->mmhub.ras = &mmhub_v1_0_ras;
1438 		break;
1439 	case IP_VERSION(9, 4, 1):
1440 		adev->mmhub.ras = &mmhub_v9_4_ras;
1441 		break;
1442 	case IP_VERSION(9, 4, 2):
1443 		adev->mmhub.ras = &mmhub_v1_7_ras;
1444 		break;
1445 	case IP_VERSION(1, 8, 0):
1446 		adev->mmhub.ras = &mmhub_v1_8_ras;
1447 		break;
1448 	default:
1449 		/* mmhub ras is not available */
1450 		break;
1451 	}
1452 }
1453 
1454 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1455 {
1456 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
1457 		adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
1458 	else
1459 		adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1460 }
1461 
1462 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1463 {
1464 	adev->hdp.ras = &hdp_v4_0_ras;
1465 }
1466 
1467 static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
1468 {
1469 	struct amdgpu_mca *mca = &adev->mca;
1470 
1471 	/* is UMC the right IP to check for MCA?  Maybe DF? */
1472 	switch (adev->ip_versions[UMC_HWIP][0]) {
1473 	case IP_VERSION(6, 7, 0):
1474 		if (!adev->gmc.xgmi.connected_to_cpu) {
1475 			mca->mp0.ras = &mca_v3_0_mp0_ras;
1476 			mca->mp1.ras = &mca_v3_0_mp1_ras;
1477 			mca->mpio.ras = &mca_v3_0_mpio_ras;
1478 		}
1479 		break;
1480 	default:
1481 		break;
1482 	}
1483 }
1484 
1485 static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
1486 {
1487 	if (!adev->gmc.xgmi.connected_to_cpu)
1488 		adev->gmc.xgmi.ras = &xgmi_ras;
1489 }
1490 
1491 static int gmc_v9_0_early_init(void *handle)
1492 {
1493 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1494 
1495 	/*
1496 	 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
1497 	 * in their IP discovery tables
1498 	 */
1499 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0) ||
1500 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
1501 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
1502 		adev->gmc.xgmi.supported = true;
1503 
1504 	if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
1505 		adev->gmc.xgmi.supported = true;
1506 		adev->gmc.xgmi.connected_to_cpu =
1507 			adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1508 	}
1509 
1510 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
1511 		enum amdgpu_pkg_type pkg_type =
1512 			adev->smuio.funcs->get_pkg_type(adev);
1513 		/* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
1514 		 * and the APU, can be in used two possible modes:
1515 		 *  - carveout mode
1516 		 *  - native APU mode
1517 		 * "is_app_apu" can be used to identify the APU in the native
1518 		 * mode.
1519 		 */
1520 		adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU &&
1521 					!pci_resource_len(adev->pdev, 0));
1522 	}
1523 
1524 	gmc_v9_0_set_gmc_funcs(adev);
1525 	gmc_v9_0_set_irq_funcs(adev);
1526 	gmc_v9_0_set_umc_funcs(adev);
1527 	gmc_v9_0_set_mmhub_funcs(adev);
1528 	gmc_v9_0_set_mmhub_ras_funcs(adev);
1529 	gmc_v9_0_set_gfxhub_funcs(adev);
1530 	gmc_v9_0_set_hdp_ras_funcs(adev);
1531 	gmc_v9_0_set_mca_ras_funcs(adev);
1532 	gmc_v9_0_set_xgmi_ras_funcs(adev);
1533 
1534 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1535 	adev->gmc.shared_aperture_end =
1536 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1537 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1538 	adev->gmc.private_aperture_end =
1539 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1540 
1541 	return 0;
1542 }
1543 
1544 static int gmc_v9_0_late_init(void *handle)
1545 {
1546 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1547 	int r;
1548 
1549 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1550 	if (r)
1551 		return r;
1552 
1553 	/*
1554 	 * Workaround performance drop issue with VBIOS enables partial
1555 	 * writes, while disables HBM ECC for vega10.
1556 	 */
1557 	if (!amdgpu_sriov_vf(adev) &&
1558 	    (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
1559 		if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1560 			if (adev->df.funcs &&
1561 			    adev->df.funcs->enable_ecc_force_par_wr_rmw)
1562 				adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1563 		}
1564 	}
1565 
1566 	if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1567 		if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
1568 		    adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
1569 			adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
1570 
1571 		if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops &&
1572 		    adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count)
1573 			adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev);
1574 	}
1575 
1576 	r = amdgpu_gmc_ras_late_init(adev);
1577 	if (r)
1578 		return r;
1579 
1580 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1581 }
1582 
1583 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1584 					struct amdgpu_gmc *mc)
1585 {
1586 	u64 base = adev->mmhub.funcs->get_fb_location(adev);
1587 
1588 	/* add the xgmi offset of the physical node */
1589 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1590 	if (adev->gmc.xgmi.connected_to_cpu) {
1591 		amdgpu_gmc_sysvm_location(adev, mc);
1592 	} else {
1593 		amdgpu_gmc_vram_location(adev, mc, base);
1594 		amdgpu_gmc_gart_location(adev, mc);
1595 		amdgpu_gmc_agp_location(adev, mc);
1596 	}
1597 	/* base offset of vram pages */
1598 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1599 
1600 	/* XXX: add the xgmi offset of the physical node? */
1601 	adev->vm_manager.vram_base_offset +=
1602 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1603 }
1604 
1605 /**
1606  * gmc_v9_0_mc_init - initialize the memory controller driver params
1607  *
1608  * @adev: amdgpu_device pointer
1609  *
1610  * Look up the amount of vram, vram width, and decide how to place
1611  * vram and gart within the GPU's physical address space.
1612  * Returns 0 for success.
1613  */
1614 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1615 {
1616 	int r;
1617 
1618 	/* size in MB on si */
1619 	if (!adev->gmc.is_app_apu) {
1620 		adev->gmc.mc_vram_size =
1621 			adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1622 	} else {
1623 		DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n");
1624 		adev->gmc.mc_vram_size = 0;
1625 	}
1626 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1627 
1628 	if (!(adev->flags & AMD_IS_APU) &&
1629 	    !adev->gmc.xgmi.connected_to_cpu) {
1630 		r = amdgpu_device_resize_fb_bar(adev);
1631 		if (r)
1632 			return r;
1633 	}
1634 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1635 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1636 
1637 #ifdef CONFIG_X86_64
1638 	/*
1639 	 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1640 	 * interface can use VRAM through here as it appears system reserved
1641 	 * memory in host address space.
1642 	 *
1643 	 * For APUs, VRAM is just the stolen system memory and can be accessed
1644 	 * directly.
1645 	 *
1646 	 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1647 	 */
1648 
1649 	/* check whether both host-gpu and gpu-gpu xgmi links exist */
1650 	if ((!amdgpu_sriov_vf(adev) &&
1651 		(adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
1652 	    (adev->gmc.xgmi.supported &&
1653 	     adev->gmc.xgmi.connected_to_cpu)) {
1654 		adev->gmc.aper_base =
1655 			adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1656 			adev->gmc.xgmi.physical_node_id *
1657 			adev->gmc.xgmi.node_segment_size;
1658 		adev->gmc.aper_size = adev->gmc.real_vram_size;
1659 	}
1660 
1661 #endif
1662 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
1663 
1664 	/* set the gart size */
1665 	if (amdgpu_gart_size == -1) {
1666 		switch (adev->ip_versions[GC_HWIP][0]) {
1667 		case IP_VERSION(9, 0, 1):  /* all engines support GPUVM */
1668 		case IP_VERSION(9, 2, 1):  /* all engines support GPUVM */
1669 		case IP_VERSION(9, 4, 0):
1670 		case IP_VERSION(9, 4, 1):
1671 		case IP_VERSION(9, 4, 2):
1672 		case IP_VERSION(9, 4, 3):
1673 		default:
1674 			adev->gmc.gart_size = 512ULL << 20;
1675 			break;
1676 		case IP_VERSION(9, 1, 0):   /* DCE SG support */
1677 		case IP_VERSION(9, 2, 2):   /* DCE SG support */
1678 		case IP_VERSION(9, 3, 0):
1679 			adev->gmc.gart_size = 1024ULL << 20;
1680 			break;
1681 		}
1682 	} else {
1683 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1684 	}
1685 
1686 	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1687 
1688 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1689 
1690 	return 0;
1691 }
1692 
1693 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1694 {
1695 	int r;
1696 
1697 	if (adev->gart.bo) {
1698 		WARN(1, "VEGA10 PCIE GART already initialized\n");
1699 		return 0;
1700 	}
1701 
1702 	if (adev->gmc.xgmi.connected_to_cpu) {
1703 		adev->gmc.vmid0_page_table_depth = 1;
1704 		adev->gmc.vmid0_page_table_block_size = 12;
1705 	} else {
1706 		adev->gmc.vmid0_page_table_depth = 0;
1707 		adev->gmc.vmid0_page_table_block_size = 0;
1708 	}
1709 
1710 	/* Initialize common gart structure */
1711 	r = amdgpu_gart_init(adev);
1712 	if (r)
1713 		return r;
1714 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1715 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1716 				 AMDGPU_PTE_EXECUTABLE;
1717 
1718 	if (!adev->gmc.real_vram_size) {
1719 		dev_info(adev->dev, "Put GART in system memory for APU\n");
1720 		r = amdgpu_gart_table_ram_alloc(adev);
1721 		if (r)
1722 			dev_err(adev->dev, "Failed to allocate GART in system memory\n");
1723 	} else {
1724 		r = amdgpu_gart_table_vram_alloc(adev);
1725 		if (r)
1726 			return r;
1727 
1728 		if (adev->gmc.xgmi.connected_to_cpu)
1729 			r = amdgpu_gmc_pdb0_alloc(adev);
1730 	}
1731 
1732 	return r;
1733 }
1734 
1735 /**
1736  * gmc_v9_0_save_registers - saves regs
1737  *
1738  * @adev: amdgpu_device pointer
1739  *
1740  * This saves potential register values that should be
1741  * restored upon resume
1742  */
1743 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1744 {
1745 	if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1746 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1)))
1747 		adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1748 }
1749 
1750 static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
1751 {
1752 	enum amdgpu_memory_partition mode;
1753 	u32 supp_modes;
1754 	bool valid;
1755 
1756 	mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
1757 
1758 	/* Mode detected by hardware not present in supported modes */
1759 	if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
1760 	    !(BIT(mode - 1) & supp_modes))
1761 		return false;
1762 
1763 	switch (mode) {
1764 	case UNKNOWN_MEMORY_PARTITION_MODE:
1765 	case AMDGPU_NPS1_PARTITION_MODE:
1766 		valid = (adev->gmc.num_mem_partitions == 1);
1767 		break;
1768 	case AMDGPU_NPS2_PARTITION_MODE:
1769 		valid = (adev->gmc.num_mem_partitions == 2);
1770 		break;
1771 	case AMDGPU_NPS4_PARTITION_MODE:
1772 		valid = (adev->gmc.num_mem_partitions == 3 ||
1773 			 adev->gmc.num_mem_partitions == 4);
1774 		break;
1775 	default:
1776 		valid = false;
1777 	}
1778 
1779 	return valid;
1780 }
1781 
1782 static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
1783 {
1784 	int i;
1785 
1786 	/* Check if node with id 'nid' is present in 'node_ids' array */
1787 	for (i = 0; i < num_ids; ++i)
1788 		if (node_ids[i] == nid)
1789 			return true;
1790 
1791 	return false;
1792 }
1793 
1794 static void
1795 gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
1796 			      struct amdgpu_mem_partition_info *mem_ranges)
1797 {
1798 	int num_ranges = 0, ret, mem_groups;
1799 	struct amdgpu_numa_info numa_info;
1800 	int node_ids[MAX_MEM_RANGES];
1801 	int num_xcc, xcc_id;
1802 	uint32_t xcc_mask;
1803 
1804 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1805 	xcc_mask = (1U << num_xcc) - 1;
1806 	mem_groups = hweight32(adev->aid_mask);
1807 
1808 	for_each_inst(xcc_id, xcc_mask)	{
1809 		ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
1810 		if (ret)
1811 			continue;
1812 
1813 		if (numa_info.nid == NUMA_NO_NODE) {
1814 			mem_ranges[0].size = numa_info.size;
1815 			mem_ranges[0].numa.node = numa_info.nid;
1816 			num_ranges = 1;
1817 			break;
1818 		}
1819 
1820 		if (gmc_v9_0_is_node_present(node_ids, num_ranges,
1821 					     numa_info.nid))
1822 			continue;
1823 
1824 		node_ids[num_ranges] = numa_info.nid;
1825 		mem_ranges[num_ranges].numa.node = numa_info.nid;
1826 		mem_ranges[num_ranges].size = numa_info.size;
1827 		++num_ranges;
1828 	}
1829 
1830 	adev->gmc.num_mem_partitions = num_ranges;
1831 
1832 	/* If there is only partition, don't use entire size */
1833 	if (adev->gmc.num_mem_partitions == 1)
1834 		mem_ranges[0].size =
1835 			(mem_ranges[0].size * (mem_groups - 1) / mem_groups);
1836 }
1837 
1838 static void
1839 gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
1840 			    struct amdgpu_mem_partition_info *mem_ranges)
1841 {
1842 	enum amdgpu_memory_partition mode;
1843 	u32 start_addr = 0, size;
1844 	int i;
1845 
1846 	mode = gmc_v9_0_query_memory_partition(adev);
1847 
1848 	switch (mode) {
1849 	case UNKNOWN_MEMORY_PARTITION_MODE:
1850 	case AMDGPU_NPS1_PARTITION_MODE:
1851 		adev->gmc.num_mem_partitions = 1;
1852 		break;
1853 	case AMDGPU_NPS2_PARTITION_MODE:
1854 		adev->gmc.num_mem_partitions = 2;
1855 		break;
1856 	case AMDGPU_NPS4_PARTITION_MODE:
1857 		if (adev->flags & AMD_IS_APU)
1858 			adev->gmc.num_mem_partitions = 3;
1859 		else
1860 			adev->gmc.num_mem_partitions = 4;
1861 		break;
1862 	default:
1863 		adev->gmc.num_mem_partitions = 1;
1864 		break;
1865 	}
1866 
1867 	size = (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) /
1868 	       adev->gmc.num_mem_partitions;
1869 
1870 	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
1871 		mem_ranges[i].range.fpfn = start_addr;
1872 		mem_ranges[i].size = ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
1873 		mem_ranges[i].range.lpfn = start_addr + size - 1;
1874 		start_addr += size;
1875 	}
1876 
1877 	/* Adjust the last one */
1878 	mem_ranges[adev->gmc.num_mem_partitions - 1].range.lpfn =
1879 		(adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
1880 	mem_ranges[adev->gmc.num_mem_partitions - 1].size =
1881 		adev->gmc.real_vram_size -
1882 		((u64)mem_ranges[adev->gmc.num_mem_partitions - 1].range.fpfn
1883 		 << AMDGPU_GPU_PAGE_SHIFT);
1884 }
1885 
1886 static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
1887 {
1888 	bool valid;
1889 
1890 	adev->gmc.mem_partitions = kzalloc(
1891 		MAX_MEM_RANGES * sizeof(struct amdgpu_mem_partition_info),
1892 		GFP_KERNEL);
1893 
1894 	if (!adev->gmc.mem_partitions)
1895 		return -ENOMEM;
1896 
1897 	/* TODO : Get the range from PSP/Discovery for dGPU */
1898 	if (adev->gmc.is_app_apu)
1899 		gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
1900 	else
1901 		gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
1902 
1903 	if (amdgpu_sriov_vf(adev))
1904 		valid = true;
1905 	else
1906 		valid = gmc_v9_0_validate_partition_info(adev);
1907 	if (!valid) {
1908 		/* TODO: handle invalid case */
1909 		dev_WARN(adev->dev,
1910 			 "Mem ranges not matching with hardware config");
1911 	}
1912 
1913 	return 0;
1914 }
1915 
1916 static int gmc_v9_0_sw_init(void *handle)
1917 {
1918 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
1919 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1920 	unsigned long inst_mask = adev->aid_mask;
1921 
1922 	adev->gfxhub.funcs->init(adev);
1923 
1924 	adev->mmhub.funcs->init(adev);
1925 
1926 	spin_lock_init(&adev->gmc.invalidate_lock);
1927 
1928 	r = amdgpu_atomfirmware_get_vram_info(adev,
1929 		&vram_width, &vram_type, &vram_vendor);
1930 	if (amdgpu_sriov_vf(adev))
1931 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1932 		 * and DF related registers is not readable, seems hardcord is the
1933 		 * only way to set the correct vram_width
1934 		 */
1935 		adev->gmc.vram_width = 2048;
1936 	else if (amdgpu_emu_mode != 1)
1937 		adev->gmc.vram_width = vram_width;
1938 
1939 	if (!adev->gmc.vram_width) {
1940 		int chansize, numchan;
1941 
1942 		/* hbm memory channel size */
1943 		if (adev->flags & AMD_IS_APU)
1944 			chansize = 64;
1945 		else
1946 			chansize = 128;
1947 		if (adev->df.funcs &&
1948 		    adev->df.funcs->get_hbm_channel_number) {
1949 			numchan = adev->df.funcs->get_hbm_channel_number(adev);
1950 			adev->gmc.vram_width = numchan * chansize;
1951 		}
1952 	}
1953 
1954 	adev->gmc.vram_type = vram_type;
1955 	adev->gmc.vram_vendor = vram_vendor;
1956 	switch (adev->ip_versions[GC_HWIP][0]) {
1957 	case IP_VERSION(9, 1, 0):
1958 	case IP_VERSION(9, 2, 2):
1959 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
1960 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
1961 
1962 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1963 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1964 		} else {
1965 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
1966 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1967 			adev->gmc.translate_further =
1968 				adev->vm_manager.num_level > 1;
1969 		}
1970 		break;
1971 	case IP_VERSION(9, 0, 1):
1972 	case IP_VERSION(9, 2, 1):
1973 	case IP_VERSION(9, 4, 0):
1974 	case IP_VERSION(9, 3, 0):
1975 	case IP_VERSION(9, 4, 2):
1976 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
1977 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
1978 
1979 		/*
1980 		 * To fulfill 4-level page support,
1981 		 * vm size is 256TB (48bit), maximum size of Vega10,
1982 		 * block size 512 (9bit)
1983 		 */
1984 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1985 		if (amdgpu_sriov_vf(adev))
1986 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1987 		else
1988 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1989 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
1990 			adev->gmc.translate_further = adev->vm_manager.num_level > 1;
1991 		break;
1992 	case IP_VERSION(9, 4, 1):
1993 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
1994 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
1995 		set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask);
1996 
1997 		/* Keep the vm size same with Vega20 */
1998 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1999 		adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2000 		break;
2001 	case IP_VERSION(9, 4, 3):
2002 		bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
2003 				  NUM_XCC(adev->gfx.xcc_mask));
2004 
2005 		inst_mask <<= AMDGPU_MMHUB0(0);
2006 		bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
2007 
2008 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2009 		break;
2010 	default:
2011 		break;
2012 	}
2013 
2014 	/* This interrupt is VMC page fault.*/
2015 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
2016 				&adev->gmc.vm_fault);
2017 	if (r)
2018 		return r;
2019 
2020 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
2021 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
2022 					&adev->gmc.vm_fault);
2023 		if (r)
2024 			return r;
2025 	}
2026 
2027 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
2028 				&adev->gmc.vm_fault);
2029 
2030 	if (r)
2031 		return r;
2032 
2033 	if (!amdgpu_sriov_vf(adev) &&
2034 	    !adev->gmc.xgmi.connected_to_cpu) {
2035 		/* interrupt sent to DF. */
2036 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
2037 				      &adev->gmc.ecc_irq);
2038 		if (r)
2039 			return r;
2040 	}
2041 
2042 	/* Set the internal MC address mask
2043 	 * This is the max address of the GPU's
2044 	 * internal address space.
2045 	 */
2046 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
2047 
2048 	dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44;
2049 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
2050 	if (r) {
2051 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
2052 		return r;
2053 	}
2054 	adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
2055 
2056 	r = gmc_v9_0_mc_init(adev);
2057 	if (r)
2058 		return r;
2059 
2060 	amdgpu_gmc_get_vbios_allocations(adev);
2061 
2062 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
2063 		r = gmc_v9_0_init_mem_ranges(adev);
2064 		if (r)
2065 			return r;
2066 	}
2067 
2068 	/* Memory manager */
2069 	r = amdgpu_bo_init(adev);
2070 	if (r)
2071 		return r;
2072 
2073 	r = gmc_v9_0_gart_init(adev);
2074 	if (r)
2075 		return r;
2076 
2077 	/*
2078 	 * number of VMs
2079 	 * VMID 0 is reserved for System
2080 	 * amdgpu graphics/compute will use VMIDs 1..n-1
2081 	 * amdkfd will use VMIDs n..15
2082 	 *
2083 	 * The first KFD VMID is 8 for GPUs with graphics, 3 for
2084 	 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
2085 	 * for video processing.
2086 	 */
2087 	adev->vm_manager.first_kfd_vmid =
2088 		(adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
2089 		 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
2090 		 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? 3 : 8;
2091 
2092 	amdgpu_vm_manager_init(adev);
2093 
2094 	gmc_v9_0_save_registers(adev);
2095 
2096 	r = amdgpu_gmc_ras_sw_init(adev);
2097 	if (r)
2098 		return r;
2099 
2100 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
2101 		amdgpu_gmc_sysfs_init(adev);
2102 
2103 	return 0;
2104 }
2105 
2106 static int gmc_v9_0_sw_fini(void *handle)
2107 {
2108 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2109 
2110 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
2111 		amdgpu_gmc_sysfs_fini(adev);
2112 	adev->gmc.num_mem_partitions = 0;
2113 	kfree(adev->gmc.mem_partitions);
2114 
2115 	amdgpu_gmc_ras_fini(adev);
2116 	amdgpu_gem_force_release(adev);
2117 	amdgpu_vm_manager_fini(adev);
2118 	if (!adev->gmc.real_vram_size) {
2119 		dev_info(adev->dev, "Put GART in system memory for APU free\n");
2120 		amdgpu_gart_table_ram_free(adev);
2121 	} else {
2122 		amdgpu_gart_table_vram_free(adev);
2123 	}
2124 	amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
2125 	amdgpu_bo_fini(adev);
2126 
2127 	return 0;
2128 }
2129 
2130 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
2131 {
2132 
2133 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
2134 	case IP_VERSION(9, 0, 0):
2135 		if (amdgpu_sriov_vf(adev))
2136 			break;
2137 		fallthrough;
2138 	case IP_VERSION(9, 4, 0):
2139 		soc15_program_register_sequence(adev,
2140 						golden_settings_mmhub_1_0_0,
2141 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
2142 		soc15_program_register_sequence(adev,
2143 						golden_settings_athub_1_0_0,
2144 						ARRAY_SIZE(golden_settings_athub_1_0_0));
2145 		break;
2146 	case IP_VERSION(9, 1, 0):
2147 	case IP_VERSION(9, 2, 0):
2148 		/* TODO for renoir */
2149 		soc15_program_register_sequence(adev,
2150 						golden_settings_athub_1_0_0,
2151 						ARRAY_SIZE(golden_settings_athub_1_0_0));
2152 		break;
2153 	default:
2154 		break;
2155 	}
2156 }
2157 
2158 /**
2159  * gmc_v9_0_restore_registers - restores regs
2160  *
2161  * @adev: amdgpu_device pointer
2162  *
2163  * This restores register values, saved at suspend.
2164  */
2165 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
2166 {
2167 	if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
2168 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) {
2169 		WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
2170 		WARN_ON(adev->gmc.sdpif_register !=
2171 			RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
2172 	}
2173 }
2174 
2175 /**
2176  * gmc_v9_0_gart_enable - gart enable
2177  *
2178  * @adev: amdgpu_device pointer
2179  */
2180 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
2181 {
2182 	int r;
2183 
2184 	if (adev->gmc.xgmi.connected_to_cpu)
2185 		amdgpu_gmc_init_pdb0(adev);
2186 
2187 	if (adev->gart.bo == NULL) {
2188 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
2189 		return -EINVAL;
2190 	}
2191 
2192 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
2193 
2194 	if (!adev->in_s0ix) {
2195 		r = adev->gfxhub.funcs->gart_enable(adev);
2196 		if (r)
2197 			return r;
2198 	}
2199 
2200 	r = adev->mmhub.funcs->gart_enable(adev);
2201 	if (r)
2202 		return r;
2203 
2204 	DRM_INFO("PCIE GART of %uM enabled.\n",
2205 		 (unsigned)(adev->gmc.gart_size >> 20));
2206 	if (adev->gmc.pdb0_bo)
2207 		DRM_INFO("PDB0 located at 0x%016llX\n",
2208 				(unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
2209 	DRM_INFO("PTB located at 0x%016llX\n",
2210 			(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
2211 
2212 	return 0;
2213 }
2214 
2215 static int gmc_v9_0_hw_init(void *handle)
2216 {
2217 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2218 	bool value;
2219 	int i, r;
2220 
2221 	/* The sequence of these two function calls matters.*/
2222 	gmc_v9_0_init_golden_registers(adev);
2223 
2224 	if (adev->mode_info.num_crtc) {
2225 		/* Lockout access through VGA aperture*/
2226 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
2227 		/* disable VGA render */
2228 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
2229 	}
2230 
2231 	if (adev->mmhub.funcs->update_power_gating)
2232 		adev->mmhub.funcs->update_power_gating(adev, true);
2233 
2234 	adev->hdp.funcs->init_registers(adev);
2235 
2236 	/* After HDP is initialized, flush HDP.*/
2237 	adev->hdp.funcs->flush_hdp(adev, NULL);
2238 
2239 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
2240 		value = false;
2241 	else
2242 		value = true;
2243 
2244 	if (!amdgpu_sriov_vf(adev)) {
2245 		if (!adev->in_s0ix)
2246 			adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2247 		adev->mmhub.funcs->set_fault_enable_default(adev, value);
2248 	}
2249 	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2250 		if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0)))
2251 			continue;
2252 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
2253 	}
2254 
2255 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
2256 		adev->umc.funcs->init_registers(adev);
2257 
2258 	r = gmc_v9_0_gart_enable(adev);
2259 	if (r)
2260 		return r;
2261 
2262 	if (amdgpu_emu_mode == 1)
2263 		return amdgpu_gmc_vram_checking(adev);
2264 	else
2265 		return r;
2266 }
2267 
2268 /**
2269  * gmc_v9_0_gart_disable - gart disable
2270  *
2271  * @adev: amdgpu_device pointer
2272  *
2273  * This disables all VM page table.
2274  */
2275 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
2276 {
2277 	if (!adev->in_s0ix)
2278 		adev->gfxhub.funcs->gart_disable(adev);
2279 	adev->mmhub.funcs->gart_disable(adev);
2280 }
2281 
2282 static int gmc_v9_0_hw_fini(void *handle)
2283 {
2284 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2285 
2286 	gmc_v9_0_gart_disable(adev);
2287 
2288 	if (amdgpu_sriov_vf(adev)) {
2289 		/* full access mode, so don't touch any GMC register */
2290 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
2291 		return 0;
2292 	}
2293 
2294 	/*
2295 	 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
2296 	 * a correct cached state for GMC. Otherwise, the "gate" again
2297 	 * operation on S3 resuming will fail due to wrong cached state.
2298 	 */
2299 	if (adev->mmhub.funcs->update_power_gating)
2300 		adev->mmhub.funcs->update_power_gating(adev, false);
2301 
2302 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
2303 
2304 	return 0;
2305 }
2306 
2307 static int gmc_v9_0_suspend(void *handle)
2308 {
2309 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2310 
2311 	return gmc_v9_0_hw_fini(adev);
2312 }
2313 
2314 static int gmc_v9_0_resume(void *handle)
2315 {
2316 	int r;
2317 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2318 
2319 	r = gmc_v9_0_hw_init(adev);
2320 	if (r)
2321 		return r;
2322 
2323 	amdgpu_vmid_reset_all(adev);
2324 
2325 	return 0;
2326 }
2327 
2328 static bool gmc_v9_0_is_idle(void *handle)
2329 {
2330 	/* MC is always ready in GMC v9.*/
2331 	return true;
2332 }
2333 
2334 static int gmc_v9_0_wait_for_idle(void *handle)
2335 {
2336 	/* There is no need to wait for MC idle in GMC v9.*/
2337 	return 0;
2338 }
2339 
2340 static int gmc_v9_0_soft_reset(void *handle)
2341 {
2342 	/* XXX for emulation.*/
2343 	return 0;
2344 }
2345 
2346 static int gmc_v9_0_set_clockgating_state(void *handle,
2347 					enum amd_clockgating_state state)
2348 {
2349 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2350 
2351 	adev->mmhub.funcs->set_clockgating(adev, state);
2352 
2353 	athub_v1_0_set_clockgating(adev, state);
2354 
2355 	return 0;
2356 }
2357 
2358 static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
2359 {
2360 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2361 
2362 	adev->mmhub.funcs->get_clockgating(adev, flags);
2363 
2364 	athub_v1_0_get_clockgating(adev, flags);
2365 }
2366 
2367 static int gmc_v9_0_set_powergating_state(void *handle,
2368 					enum amd_powergating_state state)
2369 {
2370 	return 0;
2371 }
2372 
2373 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
2374 	.name = "gmc_v9_0",
2375 	.early_init = gmc_v9_0_early_init,
2376 	.late_init = gmc_v9_0_late_init,
2377 	.sw_init = gmc_v9_0_sw_init,
2378 	.sw_fini = gmc_v9_0_sw_fini,
2379 	.hw_init = gmc_v9_0_hw_init,
2380 	.hw_fini = gmc_v9_0_hw_fini,
2381 	.suspend = gmc_v9_0_suspend,
2382 	.resume = gmc_v9_0_resume,
2383 	.is_idle = gmc_v9_0_is_idle,
2384 	.wait_for_idle = gmc_v9_0_wait_for_idle,
2385 	.soft_reset = gmc_v9_0_soft_reset,
2386 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
2387 	.set_powergating_state = gmc_v9_0_set_powergating_state,
2388 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
2389 };
2390 
2391 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
2392 {
2393 	.type = AMD_IP_BLOCK_TYPE_GMC,
2394 	.major = 9,
2395 	.minor = 0,
2396 	.rev = 0,
2397 	.funcs = &gmc_v9_0_ip_funcs,
2398 };
2399