xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 249592bf)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26 
27 #include <drm/drm_cache.h>
28 
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33 
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
42 
43 #include "soc15.h"
44 #include "soc15d.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
47 
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "mmhub_v9_4.h"
53 #include "mmhub_v1_7.h"
54 #include "umc_v6_1.h"
55 #include "umc_v6_0.h"
56 
57 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
58 
59 #include "amdgpu_ras.h"
60 #include "amdgpu_xgmi.h"
61 
62 /* add these here since we already include dce12 headers and these are for DCN */
63 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
64 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
66 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
67 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
68 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
69 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x049d
70 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
71 
72 
73 static const char *gfxhub_client_ids[] = {
74 	"CB",
75 	"DB",
76 	"IA",
77 	"WD",
78 	"CPF",
79 	"CPC",
80 	"CPG",
81 	"RLC",
82 	"TCP",
83 	"SQC (inst)",
84 	"SQC (data)",
85 	"SQG",
86 	"PA",
87 };
88 
89 static const char *mmhub_client_ids_raven[][2] = {
90 	[0][0] = "MP1",
91 	[1][0] = "MP0",
92 	[2][0] = "VCN",
93 	[3][0] = "VCNU",
94 	[4][0] = "HDP",
95 	[5][0] = "DCE",
96 	[13][0] = "UTCL2",
97 	[19][0] = "TLS",
98 	[26][0] = "OSS",
99 	[27][0] = "SDMA0",
100 	[0][1] = "MP1",
101 	[1][1] = "MP0",
102 	[2][1] = "VCN",
103 	[3][1] = "VCNU",
104 	[4][1] = "HDP",
105 	[5][1] = "XDP",
106 	[6][1] = "DBGU0",
107 	[7][1] = "DCE",
108 	[8][1] = "DCEDWB0",
109 	[9][1] = "DCEDWB1",
110 	[26][1] = "OSS",
111 	[27][1] = "SDMA0",
112 };
113 
114 static const char *mmhub_client_ids_renoir[][2] = {
115 	[0][0] = "MP1",
116 	[1][0] = "MP0",
117 	[2][0] = "HDP",
118 	[4][0] = "DCEDMC",
119 	[5][0] = "DCEVGA",
120 	[13][0] = "UTCL2",
121 	[19][0] = "TLS",
122 	[26][0] = "OSS",
123 	[27][0] = "SDMA0",
124 	[28][0] = "VCN",
125 	[29][0] = "VCNU",
126 	[30][0] = "JPEG",
127 	[0][1] = "MP1",
128 	[1][1] = "MP0",
129 	[2][1] = "HDP",
130 	[3][1] = "XDP",
131 	[6][1] = "DBGU0",
132 	[7][1] = "DCEDMC",
133 	[8][1] = "DCEVGA",
134 	[9][1] = "DCEDWB",
135 	[26][1] = "OSS",
136 	[27][1] = "SDMA0",
137 	[28][1] = "VCN",
138 	[29][1] = "VCNU",
139 	[30][1] = "JPEG",
140 };
141 
142 static const char *mmhub_client_ids_vega10[][2] = {
143 	[0][0] = "MP0",
144 	[1][0] = "UVD",
145 	[2][0] = "UVDU",
146 	[3][0] = "HDP",
147 	[13][0] = "UTCL2",
148 	[14][0] = "OSS",
149 	[15][0] = "SDMA1",
150 	[32+0][0] = "VCE0",
151 	[32+1][0] = "VCE0U",
152 	[32+2][0] = "XDMA",
153 	[32+3][0] = "DCE",
154 	[32+4][0] = "MP1",
155 	[32+14][0] = "SDMA0",
156 	[0][1] = "MP0",
157 	[1][1] = "UVD",
158 	[2][1] = "UVDU",
159 	[3][1] = "DBGU0",
160 	[4][1] = "HDP",
161 	[5][1] = "XDP",
162 	[14][1] = "OSS",
163 	[15][1] = "SDMA0",
164 	[32+0][1] = "VCE0",
165 	[32+1][1] = "VCE0U",
166 	[32+2][1] = "XDMA",
167 	[32+3][1] = "DCE",
168 	[32+4][1] = "DCEDWB",
169 	[32+5][1] = "MP1",
170 	[32+6][1] = "DBGU1",
171 	[32+14][1] = "SDMA1",
172 };
173 
174 static const char *mmhub_client_ids_vega12[][2] = {
175 	[0][0] = "MP0",
176 	[1][0] = "VCE0",
177 	[2][0] = "VCE0U",
178 	[3][0] = "HDP",
179 	[13][0] = "UTCL2",
180 	[14][0] = "OSS",
181 	[15][0] = "SDMA1",
182 	[32+0][0] = "DCE",
183 	[32+1][0] = "XDMA",
184 	[32+2][0] = "UVD",
185 	[32+3][0] = "UVDU",
186 	[32+4][0] = "MP1",
187 	[32+15][0] = "SDMA0",
188 	[0][1] = "MP0",
189 	[1][1] = "VCE0",
190 	[2][1] = "VCE0U",
191 	[3][1] = "DBGU0",
192 	[4][1] = "HDP",
193 	[5][1] = "XDP",
194 	[14][1] = "OSS",
195 	[15][1] = "SDMA0",
196 	[32+0][1] = "DCE",
197 	[32+1][1] = "DCEDWB",
198 	[32+2][1] = "XDMA",
199 	[32+3][1] = "UVD",
200 	[32+4][1] = "UVDU",
201 	[32+5][1] = "MP1",
202 	[32+6][1] = "DBGU1",
203 	[32+15][1] = "SDMA1",
204 };
205 
206 static const char *mmhub_client_ids_vega20[][2] = {
207 	[0][0] = "XDMA",
208 	[1][0] = "DCE",
209 	[2][0] = "VCE0",
210 	[3][0] = "VCE0U",
211 	[4][0] = "UVD",
212 	[5][0] = "UVD1U",
213 	[13][0] = "OSS",
214 	[14][0] = "HDP",
215 	[15][0] = "SDMA0",
216 	[32+0][0] = "UVD",
217 	[32+1][0] = "UVDU",
218 	[32+2][0] = "MP1",
219 	[32+3][0] = "MP0",
220 	[32+12][0] = "UTCL2",
221 	[32+14][0] = "SDMA1",
222 	[0][1] = "XDMA",
223 	[1][1] = "DCE",
224 	[2][1] = "DCEDWB",
225 	[3][1] = "VCE0",
226 	[4][1] = "VCE0U",
227 	[5][1] = "UVD1",
228 	[6][1] = "UVD1U",
229 	[7][1] = "DBGU0",
230 	[8][1] = "XDP",
231 	[13][1] = "OSS",
232 	[14][1] = "HDP",
233 	[15][1] = "SDMA0",
234 	[32+0][1] = "UVD",
235 	[32+1][1] = "UVDU",
236 	[32+2][1] = "DBGU1",
237 	[32+3][1] = "MP1",
238 	[32+4][1] = "MP0",
239 	[32+14][1] = "SDMA1",
240 };
241 
242 static const char *mmhub_client_ids_arcturus[][2] = {
243 	[0][0] = "DBGU1",
244 	[1][0] = "XDP",
245 	[2][0] = "MP1",
246 	[14][0] = "HDP",
247 	[171][0] = "JPEG",
248 	[172][0] = "VCN",
249 	[173][0] = "VCNU",
250 	[203][0] = "JPEG1",
251 	[204][0] = "VCN1",
252 	[205][0] = "VCN1U",
253 	[256][0] = "SDMA0",
254 	[257][0] = "SDMA1",
255 	[258][0] = "SDMA2",
256 	[259][0] = "SDMA3",
257 	[260][0] = "SDMA4",
258 	[261][0] = "SDMA5",
259 	[262][0] = "SDMA6",
260 	[263][0] = "SDMA7",
261 	[384][0] = "OSS",
262 	[0][1] = "DBGU1",
263 	[1][1] = "XDP",
264 	[2][1] = "MP1",
265 	[14][1] = "HDP",
266 	[171][1] = "JPEG",
267 	[172][1] = "VCN",
268 	[173][1] = "VCNU",
269 	[203][1] = "JPEG1",
270 	[204][1] = "VCN1",
271 	[205][1] = "VCN1U",
272 	[256][1] = "SDMA0",
273 	[257][1] = "SDMA1",
274 	[258][1] = "SDMA2",
275 	[259][1] = "SDMA3",
276 	[260][1] = "SDMA4",
277 	[261][1] = "SDMA5",
278 	[262][1] = "SDMA6",
279 	[263][1] = "SDMA7",
280 	[384][1] = "OSS",
281 };
282 
283 static const char *mmhub_client_ids_aldebaran[][2] = {
284 	[2][0] = "MP1",
285 	[3][0] = "MP0",
286 	[32+1][0] = "DBGU_IO0",
287 	[32+2][0] = "DBGU_IO2",
288 	[32+4][0] = "MPIO",
289 	[96+11][0] = "JPEG0",
290 	[96+12][0] = "VCN0",
291 	[96+13][0] = "VCNU0",
292 	[128+11][0] = "JPEG1",
293 	[128+12][0] = "VCN1",
294 	[128+13][0] = "VCNU1",
295 	[160+1][0] = "XDP",
296 	[160+14][0] = "HDP",
297 	[256+0][0] = "SDMA0",
298 	[256+1][0] = "SDMA1",
299 	[256+2][0] = "SDMA2",
300 	[256+3][0] = "SDMA3",
301 	[256+4][0] = "SDMA4",
302 	[384+0][0] = "OSS",
303 	[2][1] = "MP1",
304 	[3][1] = "MP0",
305 	[32+1][1] = "DBGU_IO0",
306 	[32+2][1] = "DBGU_IO2",
307 	[32+4][1] = "MPIO",
308 	[96+11][1] = "JPEG0",
309 	[96+12][1] = "VCN0",
310 	[96+13][1] = "VCNU0",
311 	[128+11][1] = "JPEG1",
312 	[128+12][1] = "VCN1",
313 	[128+13][1] = "VCNU1",
314 	[160+1][1] = "XDP",
315 	[160+14][1] = "HDP",
316 	[256+0][1] = "SDMA0",
317 	[256+1][1] = "SDMA1",
318 	[256+2][1] = "SDMA2",
319 	[256+3][1] = "SDMA3",
320 	[256+4][1] = "SDMA4",
321 	[384+0][1] = "OSS",
322 };
323 
324 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
325 {
326 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
327 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
328 };
329 
330 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
331 {
332 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
333 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
334 };
335 
336 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
337 	(0x000143c0 + 0x00000000),
338 	(0x000143c0 + 0x00000800),
339 	(0x000143c0 + 0x00001000),
340 	(0x000143c0 + 0x00001800),
341 	(0x000543c0 + 0x00000000),
342 	(0x000543c0 + 0x00000800),
343 	(0x000543c0 + 0x00001000),
344 	(0x000543c0 + 0x00001800),
345 	(0x000943c0 + 0x00000000),
346 	(0x000943c0 + 0x00000800),
347 	(0x000943c0 + 0x00001000),
348 	(0x000943c0 + 0x00001800),
349 	(0x000d43c0 + 0x00000000),
350 	(0x000d43c0 + 0x00000800),
351 	(0x000d43c0 + 0x00001000),
352 	(0x000d43c0 + 0x00001800),
353 	(0x001143c0 + 0x00000000),
354 	(0x001143c0 + 0x00000800),
355 	(0x001143c0 + 0x00001000),
356 	(0x001143c0 + 0x00001800),
357 	(0x001543c0 + 0x00000000),
358 	(0x001543c0 + 0x00000800),
359 	(0x001543c0 + 0x00001000),
360 	(0x001543c0 + 0x00001800),
361 	(0x001943c0 + 0x00000000),
362 	(0x001943c0 + 0x00000800),
363 	(0x001943c0 + 0x00001000),
364 	(0x001943c0 + 0x00001800),
365 	(0x001d43c0 + 0x00000000),
366 	(0x001d43c0 + 0x00000800),
367 	(0x001d43c0 + 0x00001000),
368 	(0x001d43c0 + 0x00001800),
369 };
370 
371 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
372 	(0x000143e0 + 0x00000000),
373 	(0x000143e0 + 0x00000800),
374 	(0x000143e0 + 0x00001000),
375 	(0x000143e0 + 0x00001800),
376 	(0x000543e0 + 0x00000000),
377 	(0x000543e0 + 0x00000800),
378 	(0x000543e0 + 0x00001000),
379 	(0x000543e0 + 0x00001800),
380 	(0x000943e0 + 0x00000000),
381 	(0x000943e0 + 0x00000800),
382 	(0x000943e0 + 0x00001000),
383 	(0x000943e0 + 0x00001800),
384 	(0x000d43e0 + 0x00000000),
385 	(0x000d43e0 + 0x00000800),
386 	(0x000d43e0 + 0x00001000),
387 	(0x000d43e0 + 0x00001800),
388 	(0x001143e0 + 0x00000000),
389 	(0x001143e0 + 0x00000800),
390 	(0x001143e0 + 0x00001000),
391 	(0x001143e0 + 0x00001800),
392 	(0x001543e0 + 0x00000000),
393 	(0x001543e0 + 0x00000800),
394 	(0x001543e0 + 0x00001000),
395 	(0x001543e0 + 0x00001800),
396 	(0x001943e0 + 0x00000000),
397 	(0x001943e0 + 0x00000800),
398 	(0x001943e0 + 0x00001000),
399 	(0x001943e0 + 0x00001800),
400 	(0x001d43e0 + 0x00000000),
401 	(0x001d43e0 + 0x00000800),
402 	(0x001d43e0 + 0x00001000),
403 	(0x001d43e0 + 0x00001800),
404 };
405 
406 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
407 		struct amdgpu_irq_src *src,
408 		unsigned type,
409 		enum amdgpu_interrupt_state state)
410 {
411 	u32 bits, i, tmp, reg;
412 
413 	/* Devices newer then VEGA10/12 shall have these programming
414 	     sequences performed by PSP BL */
415 	if (adev->asic_type >= CHIP_VEGA20)
416 		return 0;
417 
418 	bits = 0x7f;
419 
420 	switch (state) {
421 	case AMDGPU_IRQ_STATE_DISABLE:
422 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
423 			reg = ecc_umc_mcumc_ctrl_addrs[i];
424 			tmp = RREG32(reg);
425 			tmp &= ~bits;
426 			WREG32(reg, tmp);
427 		}
428 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
429 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
430 			tmp = RREG32(reg);
431 			tmp &= ~bits;
432 			WREG32(reg, tmp);
433 		}
434 		break;
435 	case AMDGPU_IRQ_STATE_ENABLE:
436 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
437 			reg = ecc_umc_mcumc_ctrl_addrs[i];
438 			tmp = RREG32(reg);
439 			tmp |= bits;
440 			WREG32(reg, tmp);
441 		}
442 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
443 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
444 			tmp = RREG32(reg);
445 			tmp |= bits;
446 			WREG32(reg, tmp);
447 		}
448 		break;
449 	default:
450 		break;
451 	}
452 
453 	return 0;
454 }
455 
456 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
457 					struct amdgpu_irq_src *src,
458 					unsigned type,
459 					enum amdgpu_interrupt_state state)
460 {
461 	struct amdgpu_vmhub *hub;
462 	u32 tmp, reg, bits, i, j;
463 
464 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
465 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
466 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
467 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
468 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
469 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
470 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
471 
472 	switch (state) {
473 	case AMDGPU_IRQ_STATE_DISABLE:
474 		for (j = 0; j < adev->num_vmhubs; j++) {
475 			hub = &adev->vmhub[j];
476 			for (i = 0; i < 16; i++) {
477 				reg = hub->vm_context0_cntl + i;
478 				tmp = RREG32(reg);
479 				tmp &= ~bits;
480 				WREG32(reg, tmp);
481 			}
482 		}
483 		break;
484 	case AMDGPU_IRQ_STATE_ENABLE:
485 		for (j = 0; j < adev->num_vmhubs; j++) {
486 			hub = &adev->vmhub[j];
487 			for (i = 0; i < 16; i++) {
488 				reg = hub->vm_context0_cntl + i;
489 				tmp = RREG32(reg);
490 				tmp |= bits;
491 				WREG32(reg, tmp);
492 			}
493 		}
494 		break;
495 	default:
496 		break;
497 	}
498 
499 	return 0;
500 }
501 
502 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
503 				      struct amdgpu_irq_src *source,
504 				      struct amdgpu_iv_entry *entry)
505 {
506 	bool retry_fault = !!(entry->src_data[1] & 0x80);
507 	uint32_t status = 0, cid = 0, rw = 0;
508 	struct amdgpu_task_info task_info;
509 	struct amdgpu_vmhub *hub;
510 	const char *mmhub_cid;
511 	const char *hub_name;
512 	u64 addr;
513 
514 	addr = (u64)entry->src_data[0] << 12;
515 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
516 
517 	if (retry_fault) {
518 		/* Returning 1 here also prevents sending the IV to the KFD */
519 
520 		/* Process it onyl if it's the first fault for this address */
521 		if (entry->ih != &adev->irq.ih_soft &&
522 		    amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
523 					     entry->timestamp))
524 			return 1;
525 
526 		/* Delegate it to a different ring if the hardware hasn't
527 		 * already done it.
528 		 */
529 		if (entry->ih == &adev->irq.ih) {
530 			amdgpu_irq_delegate(adev, entry, 8);
531 			return 1;
532 		}
533 
534 		/* Try to handle the recoverable page faults by filling page
535 		 * tables
536 		 */
537 		if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
538 			return 1;
539 	}
540 
541 	if (!printk_ratelimit())
542 		return 0;
543 
544 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
545 		hub_name = "mmhub0";
546 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
547 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
548 		hub_name = "mmhub1";
549 		hub = &adev->vmhub[AMDGPU_MMHUB_1];
550 	} else {
551 		hub_name = "gfxhub0";
552 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
553 	}
554 
555 	memset(&task_info, 0, sizeof(struct amdgpu_task_info));
556 	amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
557 
558 	dev_err(adev->dev,
559 		"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
560 		"pasid:%u, for process %s pid %d thread %s pid %d)\n",
561 		hub_name, retry_fault ? "retry" : "no-retry",
562 		entry->src_id, entry->ring_id, entry->vmid,
563 		entry->pasid, task_info.process_name, task_info.tgid,
564 		task_info.task_name, task_info.pid);
565 	dev_err(adev->dev, "  in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
566 		addr, entry->client_id,
567 		soc15_ih_clientid_name[entry->client_id]);
568 
569 	if (amdgpu_sriov_vf(adev))
570 		return 0;
571 
572 	/*
573 	 * Issue a dummy read to wait for the status register to
574 	 * be updated to avoid reading an incorrect value due to
575 	 * the new fast GRBM interface.
576 	 */
577 	if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
578 	    (adev->asic_type < CHIP_ALDEBARAN))
579 		RREG32(hub->vm_l2_pro_fault_status);
580 
581 	status = RREG32(hub->vm_l2_pro_fault_status);
582 	cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
583 	rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
584 	WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
585 
586 
587 	dev_err(adev->dev,
588 		"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
589 		status);
590 	if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
591 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
592 			cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
593 			gfxhub_client_ids[cid],
594 			cid);
595 	} else {
596 		switch (adev->asic_type) {
597 		case CHIP_VEGA10:
598 			mmhub_cid = mmhub_client_ids_vega10[cid][rw];
599 			break;
600 		case CHIP_VEGA12:
601 			mmhub_cid = mmhub_client_ids_vega12[cid][rw];
602 			break;
603 		case CHIP_VEGA20:
604 			mmhub_cid = mmhub_client_ids_vega20[cid][rw];
605 			break;
606 		case CHIP_ARCTURUS:
607 			mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
608 			break;
609 		case CHIP_RAVEN:
610 			mmhub_cid = mmhub_client_ids_raven[cid][rw];
611 			break;
612 		case CHIP_RENOIR:
613 			mmhub_cid = mmhub_client_ids_renoir[cid][rw];
614 			break;
615 		case CHIP_ALDEBARAN:
616 			mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
617 			break;
618 		default:
619 			mmhub_cid = NULL;
620 			break;
621 		}
622 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
623 			mmhub_cid ? mmhub_cid : "unknown", cid);
624 	}
625 	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
626 		REG_GET_FIELD(status,
627 		VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
628 	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
629 		REG_GET_FIELD(status,
630 		VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
631 	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
632 		REG_GET_FIELD(status,
633 		VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
634 	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
635 		REG_GET_FIELD(status,
636 		VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
637 	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
638 	return 0;
639 }
640 
641 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
642 	.set = gmc_v9_0_vm_fault_interrupt_state,
643 	.process = gmc_v9_0_process_interrupt,
644 };
645 
646 
647 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
648 	.set = gmc_v9_0_ecc_interrupt_state,
649 	.process = amdgpu_umc_process_ecc_irq,
650 };
651 
652 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
653 {
654 	adev->gmc.vm_fault.num_types = 1;
655 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
656 
657 	if (!amdgpu_sriov_vf(adev) &&
658 	    !adev->gmc.xgmi.connected_to_cpu) {
659 		adev->gmc.ecc_irq.num_types = 1;
660 		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
661 	}
662 }
663 
664 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
665 					uint32_t flush_type)
666 {
667 	u32 req = 0;
668 
669 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
670 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
671 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
672 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
673 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
674 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
675 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
676 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
677 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
678 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
679 
680 	return req;
681 }
682 
683 /**
684  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
685  *
686  * @adev: amdgpu_device pointer
687  * @vmhub: vmhub type
688  *
689  */
690 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
691 				       uint32_t vmhub)
692 {
693 	if (adev->asic_type == CHIP_ALDEBARAN)
694 		return false;
695 
696 	return ((vmhub == AMDGPU_MMHUB_0 ||
697 		 vmhub == AMDGPU_MMHUB_1) &&
698 		(!amdgpu_sriov_vf(adev)) &&
699 		(!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
700 		   (adev->apu_flags & AMD_APU_IS_PICASSO))));
701 }
702 
703 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
704 					uint8_t vmid, uint16_t *p_pasid)
705 {
706 	uint32_t value;
707 
708 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
709 		     + vmid);
710 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
711 
712 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
713 }
714 
715 /*
716  * GART
717  * VMID 0 is the physical GPU addresses as used by the kernel.
718  * VMIDs 1-15 are used for userspace clients and are handled
719  * by the amdgpu vm/hsa code.
720  */
721 
722 /**
723  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
724  *
725  * @adev: amdgpu_device pointer
726  * @vmid: vm instance to flush
727  * @vmhub: which hub to flush
728  * @flush_type: the flush type
729  *
730  * Flush the TLB for the requested page table using certain type.
731  */
732 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
733 					uint32_t vmhub, uint32_t flush_type)
734 {
735 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
736 	const unsigned eng = 17;
737 	u32 j, inv_req, inv_req2, tmp;
738 	struct amdgpu_vmhub *hub;
739 
740 	BUG_ON(vmhub >= adev->num_vmhubs);
741 
742 	hub = &adev->vmhub[vmhub];
743 	if (adev->gmc.xgmi.num_physical_nodes &&
744 	    adev->asic_type == CHIP_VEGA20) {
745 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
746 		 * heavy-weight TLB flush (type 2), which flushes
747 		 * both. Due to a race condition with concurrent
748 		 * memory accesses using the same TLB cache line, we
749 		 * still need a second TLB flush after this.
750 		 */
751 		inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
752 		inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
753 	} else {
754 		inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
755 		inv_req2 = 0;
756 	}
757 
758 	/* This is necessary for a HW workaround under SRIOV as well
759 	 * as GFXOFF under bare metal
760 	 */
761 	if (adev->gfx.kiq.ring.sched.ready &&
762 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
763 	    down_read_trylock(&adev->reset_sem)) {
764 		uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
765 		uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
766 
767 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
768 						   1 << vmid);
769 		up_read(&adev->reset_sem);
770 		return;
771 	}
772 
773 	spin_lock(&adev->gmc.invalidate_lock);
774 
775 	/*
776 	 * It may lose gpuvm invalidate acknowldege state across power-gating
777 	 * off cycle, add semaphore acquire before invalidation and semaphore
778 	 * release after invalidation to avoid entering power gated state
779 	 * to WA the Issue
780 	 */
781 
782 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
783 	if (use_semaphore) {
784 		for (j = 0; j < adev->usec_timeout; j++) {
785 			/* a read return value of 1 means semaphore acuqire */
786 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
787 					    hub->eng_distance * eng);
788 			if (tmp & 0x1)
789 				break;
790 			udelay(1);
791 		}
792 
793 		if (j >= adev->usec_timeout)
794 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
795 	}
796 
797 	do {
798 		WREG32_NO_KIQ(hub->vm_inv_eng0_req +
799 			      hub->eng_distance * eng, inv_req);
800 
801 		/*
802 		 * Issue a dummy read to wait for the ACK register to
803 		 * be cleared to avoid a false ACK due to the new fast
804 		 * GRBM interface.
805 		 */
806 		if ((vmhub == AMDGPU_GFXHUB_0) &&
807 		    (adev->asic_type < CHIP_ALDEBARAN))
808 			RREG32_NO_KIQ(hub->vm_inv_eng0_req +
809 				      hub->eng_distance * eng);
810 
811 		for (j = 0; j < adev->usec_timeout; j++) {
812 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
813 					    hub->eng_distance * eng);
814 			if (tmp & (1 << vmid))
815 				break;
816 			udelay(1);
817 		}
818 
819 		inv_req = inv_req2;
820 		inv_req2 = 0;
821 	} while (inv_req);
822 
823 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
824 	if (use_semaphore)
825 		/*
826 		 * add semaphore release after invalidation,
827 		 * write with 0 means semaphore release
828 		 */
829 		WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
830 			      hub->eng_distance * eng, 0);
831 
832 	spin_unlock(&adev->gmc.invalidate_lock);
833 
834 	if (j < adev->usec_timeout)
835 		return;
836 
837 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
838 }
839 
840 /**
841  * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
842  *
843  * @adev: amdgpu_device pointer
844  * @pasid: pasid to be flush
845  * @flush_type: the flush type
846  * @all_hub: flush all hubs
847  *
848  * Flush the TLB for the requested pasid.
849  */
850 static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
851 					uint16_t pasid, uint32_t flush_type,
852 					bool all_hub)
853 {
854 	int vmid, i;
855 	signed long r;
856 	uint32_t seq;
857 	uint16_t queried_pasid;
858 	bool ret;
859 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
860 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
861 
862 	if (amdgpu_in_reset(adev))
863 		return -EIO;
864 
865 	if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
866 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
867 		 * heavy-weight TLB flush (type 2), which flushes
868 		 * both. Due to a race condition with concurrent
869 		 * memory accesses using the same TLB cache line, we
870 		 * still need a second TLB flush after this.
871 		 */
872 		bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
873 				       adev->asic_type == CHIP_VEGA20);
874 		/* 2 dwords flush + 8 dwords fence */
875 		unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
876 
877 		if (vega20_xgmi_wa)
878 			ndw += kiq->pmf->invalidate_tlbs_size;
879 
880 		spin_lock(&adev->gfx.kiq.ring_lock);
881 		/* 2 dwords flush + 8 dwords fence */
882 		amdgpu_ring_alloc(ring, ndw);
883 		if (vega20_xgmi_wa)
884 			kiq->pmf->kiq_invalidate_tlbs(ring,
885 						      pasid, 2, all_hub);
886 		kiq->pmf->kiq_invalidate_tlbs(ring,
887 					pasid, flush_type, all_hub);
888 		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
889 		if (r) {
890 			amdgpu_ring_undo(ring);
891 			spin_unlock(&adev->gfx.kiq.ring_lock);
892 			up_read(&adev->reset_sem);
893 			return -ETIME;
894 		}
895 
896 		amdgpu_ring_commit(ring);
897 		spin_unlock(&adev->gfx.kiq.ring_lock);
898 		r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
899 		if (r < 1) {
900 			dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
901 			up_read(&adev->reset_sem);
902 			return -ETIME;
903 		}
904 		up_read(&adev->reset_sem);
905 		return 0;
906 	}
907 
908 	for (vmid = 1; vmid < 16; vmid++) {
909 
910 		ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
911 				&queried_pasid);
912 		if (ret && queried_pasid == pasid) {
913 			if (all_hub) {
914 				for (i = 0; i < adev->num_vmhubs; i++)
915 					gmc_v9_0_flush_gpu_tlb(adev, vmid,
916 							i, flush_type);
917 			} else {
918 				gmc_v9_0_flush_gpu_tlb(adev, vmid,
919 						AMDGPU_GFXHUB_0, flush_type);
920 			}
921 			break;
922 		}
923 	}
924 
925 	return 0;
926 
927 }
928 
929 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
930 					    unsigned vmid, uint64_t pd_addr)
931 {
932 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
933 	struct amdgpu_device *adev = ring->adev;
934 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
935 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
936 	unsigned eng = ring->vm_inv_eng;
937 
938 	/*
939 	 * It may lose gpuvm invalidate acknowldege state across power-gating
940 	 * off cycle, add semaphore acquire before invalidation and semaphore
941 	 * release after invalidation to avoid entering power gated state
942 	 * to WA the Issue
943 	 */
944 
945 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
946 	if (use_semaphore)
947 		/* a read return value of 1 means semaphore acuqire */
948 		amdgpu_ring_emit_reg_wait(ring,
949 					  hub->vm_inv_eng0_sem +
950 					  hub->eng_distance * eng, 0x1, 0x1);
951 
952 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
953 			      (hub->ctx_addr_distance * vmid),
954 			      lower_32_bits(pd_addr));
955 
956 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
957 			      (hub->ctx_addr_distance * vmid),
958 			      upper_32_bits(pd_addr));
959 
960 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
961 					    hub->eng_distance * eng,
962 					    hub->vm_inv_eng0_ack +
963 					    hub->eng_distance * eng,
964 					    req, 1 << vmid);
965 
966 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
967 	if (use_semaphore)
968 		/*
969 		 * add semaphore release after invalidation,
970 		 * write with 0 means semaphore release
971 		 */
972 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
973 				      hub->eng_distance * eng, 0);
974 
975 	return pd_addr;
976 }
977 
978 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
979 					unsigned pasid)
980 {
981 	struct amdgpu_device *adev = ring->adev;
982 	uint32_t reg;
983 
984 	/* Do nothing because there's no lut register for mmhub1. */
985 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
986 		return;
987 
988 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
989 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
990 	else
991 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
992 
993 	amdgpu_ring_emit_wreg(ring, reg, pasid);
994 }
995 
996 /*
997  * PTE format on VEGA 10:
998  * 63:59 reserved
999  * 58:57 mtype
1000  * 56 F
1001  * 55 L
1002  * 54 P
1003  * 53 SW
1004  * 52 T
1005  * 50:48 reserved
1006  * 47:12 4k physical page base address
1007  * 11:7 fragment
1008  * 6 write
1009  * 5 read
1010  * 4 exe
1011  * 3 Z
1012  * 2 snooped
1013  * 1 system
1014  * 0 valid
1015  *
1016  * PDE format on VEGA 10:
1017  * 63:59 block fragment size
1018  * 58:55 reserved
1019  * 54 P
1020  * 53:48 reserved
1021  * 47:6 physical base address of PD or PTE
1022  * 5:3 reserved
1023  * 2 C
1024  * 1 system
1025  * 0 valid
1026  */
1027 
1028 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1029 
1030 {
1031 	switch (flags) {
1032 	case AMDGPU_VM_MTYPE_DEFAULT:
1033 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1034 	case AMDGPU_VM_MTYPE_NC:
1035 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1036 	case AMDGPU_VM_MTYPE_WC:
1037 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1038 	case AMDGPU_VM_MTYPE_RW:
1039 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1040 	case AMDGPU_VM_MTYPE_CC:
1041 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1042 	case AMDGPU_VM_MTYPE_UC:
1043 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1044 	default:
1045 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1046 	}
1047 }
1048 
1049 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1050 				uint64_t *addr, uint64_t *flags)
1051 {
1052 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1053 		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1054 	BUG_ON(*addr & 0xFFFF00000000003FULL);
1055 
1056 	if (!adev->gmc.translate_further)
1057 		return;
1058 
1059 	if (level == AMDGPU_VM_PDB1) {
1060 		/* Set the block fragment size */
1061 		if (!(*flags & AMDGPU_PDE_PTE))
1062 			*flags |= AMDGPU_PDE_BFS(0x9);
1063 
1064 	} else if (level == AMDGPU_VM_PDB0) {
1065 		if (*flags & AMDGPU_PDE_PTE)
1066 			*flags &= ~AMDGPU_PDE_PTE;
1067 		else
1068 			*flags |= AMDGPU_PTE_TF;
1069 	}
1070 }
1071 
1072 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1073 				struct amdgpu_bo_va_mapping *mapping,
1074 				uint64_t *flags)
1075 {
1076 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
1077 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1078 
1079 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1080 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1081 
1082 	if (mapping->flags & AMDGPU_PTE_PRT) {
1083 		*flags |= AMDGPU_PTE_PRT;
1084 		*flags &= ~AMDGPU_PTE_VALID;
1085 	}
1086 
1087 	if ((adev->asic_type == CHIP_ARCTURUS ||
1088 	    adev->asic_type == CHIP_ALDEBARAN) &&
1089 	    !(*flags & AMDGPU_PTE_SYSTEM) &&
1090 	    mapping->bo_va->is_xgmi)
1091 		*flags |= AMDGPU_PTE_SNOOPED;
1092 
1093 	if (adev->asic_type == CHIP_ALDEBARAN)
1094 		*flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
1095 }
1096 
1097 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1098 {
1099 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1100 	unsigned size;
1101 
1102 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1103 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
1104 	} else {
1105 		u32 viewport;
1106 
1107 		switch (adev->asic_type) {
1108 		case CHIP_RAVEN:
1109 		case CHIP_RENOIR:
1110 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1111 			size = (REG_GET_FIELD(viewport,
1112 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1113 				REG_GET_FIELD(viewport,
1114 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1115 				4);
1116 			break;
1117 		case CHIP_VEGA10:
1118 		case CHIP_VEGA12:
1119 		case CHIP_VEGA20:
1120 		default:
1121 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1122 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1123 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1124 				4);
1125 			break;
1126 		}
1127 	}
1128 
1129 	return size;
1130 }
1131 
1132 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1133 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1134 	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1135 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1136 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1137 	.map_mtype = gmc_v9_0_map_mtype,
1138 	.get_vm_pde = gmc_v9_0_get_vm_pde,
1139 	.get_vm_pte = gmc_v9_0_get_vm_pte,
1140 	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1141 };
1142 
1143 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1144 {
1145 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1146 }
1147 
1148 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1149 {
1150 	switch (adev->asic_type) {
1151 	case CHIP_VEGA10:
1152 		adev->umc.funcs = &umc_v6_0_funcs;
1153 		break;
1154 	case CHIP_VEGA20:
1155 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1156 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1157 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1158 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1159 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1160 		adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
1161 		break;
1162 	case CHIP_ARCTURUS:
1163 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1164 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1165 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1166 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1167 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1168 		adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
1169 		break;
1170 	default:
1171 		break;
1172 	}
1173 }
1174 
1175 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1176 {
1177 	switch (adev->asic_type) {
1178 	case CHIP_ARCTURUS:
1179 		adev->mmhub.funcs = &mmhub_v9_4_funcs;
1180 		break;
1181 	case CHIP_ALDEBARAN:
1182 		adev->mmhub.funcs = &mmhub_v1_7_funcs;
1183 		break;
1184 	default:
1185 		adev->mmhub.funcs = &mmhub_v1_0_funcs;
1186 		break;
1187 	}
1188 }
1189 
1190 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1191 {
1192 	switch (adev->asic_type) {
1193 	case CHIP_VEGA20:
1194 		adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
1195 		break;
1196 	case CHIP_ARCTURUS:
1197 		adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
1198 		break;
1199 	case CHIP_ALDEBARAN:
1200 		adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
1201 		break;
1202 	default:
1203 		/* mmhub ras is not available */
1204 		break;
1205 	}
1206 }
1207 
1208 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1209 {
1210 	adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1211 }
1212 
1213 static int gmc_v9_0_early_init(void *handle)
1214 {
1215 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1216 
1217 	if (adev->asic_type == CHIP_VEGA20 ||
1218 	    adev->asic_type == CHIP_ARCTURUS)
1219 		adev->gmc.xgmi.supported = true;
1220 
1221 	if (adev->asic_type == CHIP_ALDEBARAN) {
1222 		adev->gmc.xgmi.supported = true;
1223 		adev->gmc.xgmi.connected_to_cpu =
1224 			adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1225 	}
1226 
1227 	gmc_v9_0_set_gmc_funcs(adev);
1228 	gmc_v9_0_set_irq_funcs(adev);
1229 	gmc_v9_0_set_umc_funcs(adev);
1230 	gmc_v9_0_set_mmhub_funcs(adev);
1231 	gmc_v9_0_set_mmhub_ras_funcs(adev);
1232 	gmc_v9_0_set_gfxhub_funcs(adev);
1233 
1234 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1235 	adev->gmc.shared_aperture_end =
1236 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1237 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1238 	adev->gmc.private_aperture_end =
1239 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1240 
1241 	return 0;
1242 }
1243 
1244 static int gmc_v9_0_late_init(void *handle)
1245 {
1246 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1247 	int r;
1248 
1249 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1250 	if (r)
1251 		return r;
1252 
1253 	/*
1254 	 * Workaround performance drop issue with VBIOS enables partial
1255 	 * writes, while disables HBM ECC for vega10.
1256 	 */
1257 	if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
1258 		if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1259 			if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
1260 				adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1261 		}
1262 	}
1263 
1264 	if (adev->mmhub.ras_funcs &&
1265 	    adev->mmhub.ras_funcs->reset_ras_error_count)
1266 		adev->mmhub.ras_funcs->reset_ras_error_count(adev);
1267 
1268 	r = amdgpu_gmc_ras_late_init(adev);
1269 	if (r)
1270 		return r;
1271 
1272 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1273 }
1274 
1275 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1276 					struct amdgpu_gmc *mc)
1277 {
1278 	u64 base = 0;
1279 
1280 	if (!amdgpu_sriov_vf(adev))
1281 		base = adev->mmhub.funcs->get_fb_location(adev);
1282 
1283 	/* add the xgmi offset of the physical node */
1284 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1285 	if (adev->gmc.xgmi.connected_to_cpu) {
1286 		amdgpu_gmc_sysvm_location(adev, mc);
1287 	} else {
1288 		amdgpu_gmc_vram_location(adev, mc, base);
1289 		amdgpu_gmc_gart_location(adev, mc);
1290 		amdgpu_gmc_agp_location(adev, mc);
1291 	}
1292 	/* base offset of vram pages */
1293 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1294 
1295 	/* XXX: add the xgmi offset of the physical node? */
1296 	adev->vm_manager.vram_base_offset +=
1297 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1298 }
1299 
1300 /**
1301  * gmc_v9_0_mc_init - initialize the memory controller driver params
1302  *
1303  * @adev: amdgpu_device pointer
1304  *
1305  * Look up the amount of vram, vram width, and decide how to place
1306  * vram and gart within the GPU's physical address space.
1307  * Returns 0 for success.
1308  */
1309 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1310 {
1311 	int r;
1312 
1313 	/* size in MB on si */
1314 	adev->gmc.mc_vram_size =
1315 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1316 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1317 
1318 	if (!(adev->flags & AMD_IS_APU) &&
1319 	    !adev->gmc.xgmi.connected_to_cpu) {
1320 		r = amdgpu_device_resize_fb_bar(adev);
1321 		if (r)
1322 			return r;
1323 	}
1324 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1325 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1326 
1327 #ifdef CONFIG_X86_64
1328 	/*
1329 	 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1330 	 * interface can use VRAM through here as it appears system reserved
1331 	 * memory in host address space.
1332 	 *
1333 	 * For APUs, VRAM is just the stolen system memory and can be accessed
1334 	 * directly.
1335 	 *
1336 	 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1337 	 */
1338 
1339 	/* check whether both host-gpu and gpu-gpu xgmi links exist */
1340 	if ((adev->flags & AMD_IS_APU) ||
1341 	    (adev->gmc.xgmi.supported &&
1342 	     adev->gmc.xgmi.connected_to_cpu)) {
1343 		adev->gmc.aper_base =
1344 			adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1345 			adev->gmc.xgmi.physical_node_id *
1346 			adev->gmc.xgmi.node_segment_size;
1347 		adev->gmc.aper_size = adev->gmc.real_vram_size;
1348 	}
1349 
1350 #endif
1351 	/* In case the PCI BAR is larger than the actual amount of vram */
1352 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
1353 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1354 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1355 
1356 	/* set the gart size */
1357 	if (amdgpu_gart_size == -1) {
1358 		switch (adev->asic_type) {
1359 		case CHIP_VEGA10:  /* all engines support GPUVM */
1360 		case CHIP_VEGA12:  /* all engines support GPUVM */
1361 		case CHIP_VEGA20:
1362 		case CHIP_ARCTURUS:
1363 		case CHIP_ALDEBARAN:
1364 		default:
1365 			adev->gmc.gart_size = 512ULL << 20;
1366 			break;
1367 		case CHIP_RAVEN:   /* DCE SG support */
1368 		case CHIP_RENOIR:
1369 			adev->gmc.gart_size = 1024ULL << 20;
1370 			break;
1371 		}
1372 	} else {
1373 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1374 	}
1375 
1376 	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1377 
1378 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1379 
1380 	return 0;
1381 }
1382 
1383 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1384 {
1385 	int r;
1386 
1387 	if (adev->gart.bo) {
1388 		WARN(1, "VEGA10 PCIE GART already initialized\n");
1389 		return 0;
1390 	}
1391 
1392 	if (adev->gmc.xgmi.connected_to_cpu) {
1393 		adev->gmc.vmid0_page_table_depth = 1;
1394 		adev->gmc.vmid0_page_table_block_size = 12;
1395 	} else {
1396 		adev->gmc.vmid0_page_table_depth = 0;
1397 		adev->gmc.vmid0_page_table_block_size = 0;
1398 	}
1399 
1400 	/* Initialize common gart structure */
1401 	r = amdgpu_gart_init(adev);
1402 	if (r)
1403 		return r;
1404 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1405 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1406 				 AMDGPU_PTE_EXECUTABLE;
1407 
1408 	r = amdgpu_gart_table_vram_alloc(adev);
1409 	if (r)
1410 		return r;
1411 
1412 	if (adev->gmc.xgmi.connected_to_cpu) {
1413 		r = amdgpu_gmc_pdb0_alloc(adev);
1414 	}
1415 
1416 	return r;
1417 }
1418 
1419 /**
1420  * gmc_v9_0_save_registers - saves regs
1421  *
1422  * @adev: amdgpu_device pointer
1423  *
1424  * This saves potential register values that should be
1425  * restored upon resume
1426  */
1427 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1428 {
1429 	if (adev->asic_type == CHIP_RAVEN)
1430 		adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1431 }
1432 
1433 static int gmc_v9_0_sw_init(void *handle)
1434 {
1435 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1436 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1437 
1438 	adev->gfxhub.funcs->init(adev);
1439 
1440 	adev->mmhub.funcs->init(adev);
1441 
1442 	spin_lock_init(&adev->gmc.invalidate_lock);
1443 
1444 	r = amdgpu_atomfirmware_get_vram_info(adev,
1445 		&vram_width, &vram_type, &vram_vendor);
1446 	if (amdgpu_sriov_vf(adev))
1447 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1448 		 * and DF related registers is not readable, seems hardcord is the
1449 		 * only way to set the correct vram_width
1450 		 */
1451 		adev->gmc.vram_width = 2048;
1452 	else if (amdgpu_emu_mode != 1)
1453 		adev->gmc.vram_width = vram_width;
1454 
1455 	if (!adev->gmc.vram_width) {
1456 		int chansize, numchan;
1457 
1458 		/* hbm memory channel size */
1459 		if (adev->flags & AMD_IS_APU)
1460 			chansize = 64;
1461 		else
1462 			chansize = 128;
1463 
1464 		numchan = adev->df.funcs->get_hbm_channel_number(adev);
1465 		adev->gmc.vram_width = numchan * chansize;
1466 	}
1467 
1468 	adev->gmc.vram_type = vram_type;
1469 	adev->gmc.vram_vendor = vram_vendor;
1470 	switch (adev->asic_type) {
1471 	case CHIP_RAVEN:
1472 		adev->num_vmhubs = 2;
1473 
1474 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1475 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1476 		} else {
1477 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
1478 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1479 			adev->gmc.translate_further =
1480 				adev->vm_manager.num_level > 1;
1481 		}
1482 		break;
1483 	case CHIP_VEGA10:
1484 	case CHIP_VEGA12:
1485 	case CHIP_VEGA20:
1486 	case CHIP_RENOIR:
1487 	case CHIP_ALDEBARAN:
1488 		adev->num_vmhubs = 2;
1489 
1490 
1491 		/*
1492 		 * To fulfill 4-level page support,
1493 		 * vm size is 256TB (48bit), maximum size of Vega10,
1494 		 * block size 512 (9bit)
1495 		 */
1496 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1497 		if (amdgpu_sriov_vf(adev))
1498 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1499 		else
1500 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1501 		break;
1502 	case CHIP_ARCTURUS:
1503 		adev->num_vmhubs = 3;
1504 
1505 		/* Keep the vm size same with Vega20 */
1506 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1507 		break;
1508 	default:
1509 		break;
1510 	}
1511 
1512 	/* This interrupt is VMC page fault.*/
1513 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1514 				&adev->gmc.vm_fault);
1515 	if (r)
1516 		return r;
1517 
1518 	if (adev->asic_type == CHIP_ARCTURUS) {
1519 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1520 					&adev->gmc.vm_fault);
1521 		if (r)
1522 			return r;
1523 	}
1524 
1525 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1526 				&adev->gmc.vm_fault);
1527 
1528 	if (r)
1529 		return r;
1530 
1531 	if (!amdgpu_sriov_vf(adev) &&
1532 	    !adev->gmc.xgmi.connected_to_cpu) {
1533 		/* interrupt sent to DF. */
1534 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1535 				      &adev->gmc.ecc_irq);
1536 		if (r)
1537 			return r;
1538 	}
1539 
1540 	/* Set the internal MC address mask
1541 	 * This is the max address of the GPU's
1542 	 * internal address space.
1543 	 */
1544 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1545 
1546 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1547 	if (r) {
1548 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1549 		return r;
1550 	}
1551 	adev->need_swiotlb = drm_need_swiotlb(44);
1552 
1553 	if (adev->gmc.xgmi.supported) {
1554 		r = adev->gfxhub.funcs->get_xgmi_info(adev);
1555 		if (r)
1556 			return r;
1557 	}
1558 
1559 	r = gmc_v9_0_mc_init(adev);
1560 	if (r)
1561 		return r;
1562 
1563 	amdgpu_gmc_get_vbios_allocations(adev);
1564 
1565 	/* Memory manager */
1566 	r = amdgpu_bo_init(adev);
1567 	if (r)
1568 		return r;
1569 
1570 	r = gmc_v9_0_gart_init(adev);
1571 	if (r)
1572 		return r;
1573 
1574 	/*
1575 	 * number of VMs
1576 	 * VMID 0 is reserved for System
1577 	 * amdgpu graphics/compute will use VMIDs 1..n-1
1578 	 * amdkfd will use VMIDs n..15
1579 	 *
1580 	 * The first KFD VMID is 8 for GPUs with graphics, 3 for
1581 	 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
1582 	 * for video processing.
1583 	 */
1584 	adev->vm_manager.first_kfd_vmid =
1585 		(adev->asic_type == CHIP_ARCTURUS ||
1586 		 adev->asic_type == CHIP_ALDEBARAN) ? 3 : 8;
1587 
1588 	amdgpu_vm_manager_init(adev);
1589 
1590 	gmc_v9_0_save_registers(adev);
1591 
1592 	return 0;
1593 }
1594 
1595 static int gmc_v9_0_sw_fini(void *handle)
1596 {
1597 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1598 
1599 	amdgpu_gmc_ras_fini(adev);
1600 	amdgpu_gem_force_release(adev);
1601 	amdgpu_vm_manager_fini(adev);
1602 	amdgpu_gart_table_vram_free(adev);
1603 	amdgpu_bo_unref(&adev->gmc.pdb0_bo);
1604 	amdgpu_bo_fini(adev);
1605 	amdgpu_gart_fini(adev);
1606 
1607 	return 0;
1608 }
1609 
1610 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1611 {
1612 
1613 	switch (adev->asic_type) {
1614 	case CHIP_VEGA10:
1615 		if (amdgpu_sriov_vf(adev))
1616 			break;
1617 		fallthrough;
1618 	case CHIP_VEGA20:
1619 		soc15_program_register_sequence(adev,
1620 						golden_settings_mmhub_1_0_0,
1621 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1622 		soc15_program_register_sequence(adev,
1623 						golden_settings_athub_1_0_0,
1624 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1625 		break;
1626 	case CHIP_VEGA12:
1627 		break;
1628 	case CHIP_RAVEN:
1629 		/* TODO for renoir */
1630 		soc15_program_register_sequence(adev,
1631 						golden_settings_athub_1_0_0,
1632 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1633 		break;
1634 	default:
1635 		break;
1636 	}
1637 }
1638 
1639 /**
1640  * gmc_v9_0_restore_registers - restores regs
1641  *
1642  * @adev: amdgpu_device pointer
1643  *
1644  * This restores register values, saved at suspend.
1645  */
1646 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1647 {
1648 	if (adev->asic_type == CHIP_RAVEN) {
1649 		WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
1650 		WARN_ON(adev->gmc.sdpif_register !=
1651 			RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
1652 	}
1653 }
1654 
1655 /**
1656  * gmc_v9_0_gart_enable - gart enable
1657  *
1658  * @adev: amdgpu_device pointer
1659  */
1660 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1661 {
1662 	int r;
1663 
1664 	if (adev->gmc.xgmi.connected_to_cpu)
1665 		amdgpu_gmc_init_pdb0(adev);
1666 
1667 	if (adev->gart.bo == NULL) {
1668 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1669 		return -EINVAL;
1670 	}
1671 
1672 	r = amdgpu_gart_table_vram_pin(adev);
1673 	if (r)
1674 		return r;
1675 
1676 	r = adev->gfxhub.funcs->gart_enable(adev);
1677 	if (r)
1678 		return r;
1679 
1680 	r = adev->mmhub.funcs->gart_enable(adev);
1681 	if (r)
1682 		return r;
1683 
1684 	DRM_INFO("PCIE GART of %uM enabled.\n",
1685 		 (unsigned)(adev->gmc.gart_size >> 20));
1686 	if (adev->gmc.pdb0_bo)
1687 		DRM_INFO("PDB0 located at 0x%016llX\n",
1688 				(unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
1689 	DRM_INFO("PTB located at 0x%016llX\n",
1690 			(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1691 
1692 	adev->gart.ready = true;
1693 	return 0;
1694 }
1695 
1696 static int gmc_v9_0_hw_init(void *handle)
1697 {
1698 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1699 	bool value;
1700 	int r, i;
1701 
1702 	/* The sequence of these two function calls matters.*/
1703 	gmc_v9_0_init_golden_registers(adev);
1704 
1705 	if (adev->mode_info.num_crtc) {
1706 		/* Lockout access through VGA aperture*/
1707 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1708 		/* disable VGA render */
1709 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1710 	}
1711 
1712 	if (adev->mmhub.funcs->update_power_gating)
1713 		adev->mmhub.funcs->update_power_gating(adev, true);
1714 
1715 	adev->hdp.funcs->init_registers(adev);
1716 
1717 	/* After HDP is initialized, flush HDP.*/
1718 	adev->hdp.funcs->flush_hdp(adev, NULL);
1719 
1720 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1721 		value = false;
1722 	else
1723 		value = true;
1724 
1725 	if (!amdgpu_sriov_vf(adev)) {
1726 		adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1727 		adev->mmhub.funcs->set_fault_enable_default(adev, value);
1728 	}
1729 	for (i = 0; i < adev->num_vmhubs; ++i)
1730 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1731 
1732 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1733 		adev->umc.funcs->init_registers(adev);
1734 
1735 	r = gmc_v9_0_gart_enable(adev);
1736 
1737 	return r;
1738 }
1739 
1740 /**
1741  * gmc_v9_0_gart_disable - gart disable
1742  *
1743  * @adev: amdgpu_device pointer
1744  *
1745  * This disables all VM page table.
1746  */
1747 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1748 {
1749 	adev->gfxhub.funcs->gart_disable(adev);
1750 	adev->mmhub.funcs->gart_disable(adev);
1751 	amdgpu_gart_table_vram_unpin(adev);
1752 }
1753 
1754 static int gmc_v9_0_hw_fini(void *handle)
1755 {
1756 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1757 
1758 	if (amdgpu_sriov_vf(adev)) {
1759 		/* full access mode, so don't touch any GMC register */
1760 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1761 		return 0;
1762 	}
1763 
1764 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1765 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1766 	gmc_v9_0_gart_disable(adev);
1767 
1768 	return 0;
1769 }
1770 
1771 static int gmc_v9_0_suspend(void *handle)
1772 {
1773 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1774 
1775 	return gmc_v9_0_hw_fini(adev);
1776 }
1777 
1778 static int gmc_v9_0_resume(void *handle)
1779 {
1780 	int r;
1781 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1782 
1783 	r = gmc_v9_0_hw_init(adev);
1784 	if (r)
1785 		return r;
1786 
1787 	amdgpu_vmid_reset_all(adev);
1788 
1789 	return 0;
1790 }
1791 
1792 static bool gmc_v9_0_is_idle(void *handle)
1793 {
1794 	/* MC is always ready in GMC v9.*/
1795 	return true;
1796 }
1797 
1798 static int gmc_v9_0_wait_for_idle(void *handle)
1799 {
1800 	/* There is no need to wait for MC idle in GMC v9.*/
1801 	return 0;
1802 }
1803 
1804 static int gmc_v9_0_soft_reset(void *handle)
1805 {
1806 	/* XXX for emulation.*/
1807 	return 0;
1808 }
1809 
1810 static int gmc_v9_0_set_clockgating_state(void *handle,
1811 					enum amd_clockgating_state state)
1812 {
1813 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1814 
1815 	adev->mmhub.funcs->set_clockgating(adev, state);
1816 
1817 	athub_v1_0_set_clockgating(adev, state);
1818 
1819 	return 0;
1820 }
1821 
1822 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1823 {
1824 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1825 
1826 	adev->mmhub.funcs->get_clockgating(adev, flags);
1827 
1828 	athub_v1_0_get_clockgating(adev, flags);
1829 }
1830 
1831 static int gmc_v9_0_set_powergating_state(void *handle,
1832 					enum amd_powergating_state state)
1833 {
1834 	return 0;
1835 }
1836 
1837 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1838 	.name = "gmc_v9_0",
1839 	.early_init = gmc_v9_0_early_init,
1840 	.late_init = gmc_v9_0_late_init,
1841 	.sw_init = gmc_v9_0_sw_init,
1842 	.sw_fini = gmc_v9_0_sw_fini,
1843 	.hw_init = gmc_v9_0_hw_init,
1844 	.hw_fini = gmc_v9_0_hw_fini,
1845 	.suspend = gmc_v9_0_suspend,
1846 	.resume = gmc_v9_0_resume,
1847 	.is_idle = gmc_v9_0_is_idle,
1848 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1849 	.soft_reset = gmc_v9_0_soft_reset,
1850 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1851 	.set_powergating_state = gmc_v9_0_set_powergating_state,
1852 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1853 };
1854 
1855 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1856 {
1857 	.type = AMD_IP_BLOCK_TYPE_GMC,
1858 	.major = 9,
1859 	.minor = 0,
1860 	.rev = 0,
1861 	.funcs = &gmc_v9_0_ip_funcs,
1862 };
1863