xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 2ee9403e)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23f867723bSSam Ravnborg 
24e60f8db5SAlex Xie #include <linux/firmware.h>
25f867723bSSam Ravnborg #include <linux/pci.h>
26f867723bSSam Ravnborg 
27fd5fd480SChunming Zhou #include <drm/drm_cache.h>
28f867723bSSam Ravnborg 
29e60f8db5SAlex Xie #include "amdgpu.h"
30e60f8db5SAlex Xie #include "gmc_v9_0.h"
318d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
322cddc50eSHuang Rui #include "amdgpu_gem.h"
33e60f8db5SAlex Xie 
3475199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3575199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
36cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
37135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
38135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
39fb960bd2SFeifei Xu #include "vega10_enum.h"
4065417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
416ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
42250b4228SChristian König #include "oss/osssys_4_0_offset.h"
43e60f8db5SAlex Xie 
44946a4d5bSShaoyun Liu #include "soc15.h"
45e60f8db5SAlex Xie #include "soc15_common.h"
4690c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
47e60f8db5SAlex Xie 
48e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
49e60f8db5SAlex Xie #include "mmhub_v1_0.h"
50bee7b51aSLe Ma #include "athub_v1_0.h"
51bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
5251cce480SLe Ma #include "mmhub_v9_4.h"
535b6b35aaSHawking Zhang #include "umc_v6_1.h"
54e7da754bSMonk Liu #include "umc_v6_0.h"
55e60f8db5SAlex Xie 
5644a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
5744a99b65SAndrey Grodzovsky 
58791c4769Sxinhui pan #include "amdgpu_ras.h"
59029fbd43SHawking Zhang #include "amdgpu_xgmi.h"
60791c4769Sxinhui pan 
61ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
62ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
63ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
64ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
65ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
66ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
67ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
68ebdef28eSAlex Deucher 
69e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
70e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
71e60f8db5SAlex Xie 
72e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
73e60f8db5SAlex Xie {
74e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
75e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
76e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
77e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
78e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
79e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
80e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
81e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
82e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
83e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
84e60f8db5SAlex Xie };
85e60f8db5SAlex Xie 
86946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
875c583018SEvan Quan {
88946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
89946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
905c583018SEvan Quan };
915c583018SEvan Quan 
92946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
935c583018SEvan Quan {
94946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
95946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
965c583018SEvan Quan };
975c583018SEvan Quan 
98791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
99791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
100791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
101791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
102791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
103791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
104791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
105791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
106791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
107791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
108791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
109791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
110791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
111791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
112791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
113791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
114791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
115791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
116791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
117791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
118791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
119791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
120791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
121791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
122791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
123791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
124791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
125791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
126791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
127791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
128791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
129791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
130791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
13102bab923SDavid Panariti };
13202bab923SDavid Panariti 
133791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
134791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
135791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
136791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
137791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
138791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
139791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
140791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
141791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
142791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
143791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
144791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
145791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
146791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
147791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
148791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
149791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
150791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
151791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
152791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
153791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
154791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
155791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
156791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
157791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
158791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
159791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
160791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
161791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
162791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
163791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
164791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
165791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
16602bab923SDavid Panariti };
16702bab923SDavid Panariti 
168791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_status_addrs[] = {
169791c4769Sxinhui pan 	(0x000143c2 + 0x00000000),
170791c4769Sxinhui pan 	(0x000143c2 + 0x00000800),
171791c4769Sxinhui pan 	(0x000143c2 + 0x00001000),
172791c4769Sxinhui pan 	(0x000143c2 + 0x00001800),
173791c4769Sxinhui pan 	(0x000543c2 + 0x00000000),
174791c4769Sxinhui pan 	(0x000543c2 + 0x00000800),
175791c4769Sxinhui pan 	(0x000543c2 + 0x00001000),
176791c4769Sxinhui pan 	(0x000543c2 + 0x00001800),
177791c4769Sxinhui pan 	(0x000943c2 + 0x00000000),
178791c4769Sxinhui pan 	(0x000943c2 + 0x00000800),
179791c4769Sxinhui pan 	(0x000943c2 + 0x00001000),
180791c4769Sxinhui pan 	(0x000943c2 + 0x00001800),
181791c4769Sxinhui pan 	(0x000d43c2 + 0x00000000),
182791c4769Sxinhui pan 	(0x000d43c2 + 0x00000800),
183791c4769Sxinhui pan 	(0x000d43c2 + 0x00001000),
184791c4769Sxinhui pan 	(0x000d43c2 + 0x00001800),
185791c4769Sxinhui pan 	(0x001143c2 + 0x00000000),
186791c4769Sxinhui pan 	(0x001143c2 + 0x00000800),
187791c4769Sxinhui pan 	(0x001143c2 + 0x00001000),
188791c4769Sxinhui pan 	(0x001143c2 + 0x00001800),
189791c4769Sxinhui pan 	(0x001543c2 + 0x00000000),
190791c4769Sxinhui pan 	(0x001543c2 + 0x00000800),
191791c4769Sxinhui pan 	(0x001543c2 + 0x00001000),
192791c4769Sxinhui pan 	(0x001543c2 + 0x00001800),
193791c4769Sxinhui pan 	(0x001943c2 + 0x00000000),
194791c4769Sxinhui pan 	(0x001943c2 + 0x00000800),
195791c4769Sxinhui pan 	(0x001943c2 + 0x00001000),
196791c4769Sxinhui pan 	(0x001943c2 + 0x00001800),
197791c4769Sxinhui pan 	(0x001d43c2 + 0x00000000),
198791c4769Sxinhui pan 	(0x001d43c2 + 0x00000800),
199791c4769Sxinhui pan 	(0x001d43c2 + 0x00001000),
200791c4769Sxinhui pan 	(0x001d43c2 + 0x00001800),
20102bab923SDavid Panariti };
20202bab923SDavid Panariti 
203791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
204791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
205791c4769Sxinhui pan 		unsigned type,
206791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
207791c4769Sxinhui pan {
208791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
209791c4769Sxinhui pan 
2101e2c6d55SJohn Clements 	/* Devices newer then VEGA10/12 shall have these programming
2111e2c6d55SJohn Clements 	     sequences performed by PSP BL */
2121e2c6d55SJohn Clements 	if (adev->asic_type >= CHIP_VEGA20)
2131e2c6d55SJohn Clements 		return 0;
2141e2c6d55SJohn Clements 
215791c4769Sxinhui pan 	bits = 0x7f;
216791c4769Sxinhui pan 
217791c4769Sxinhui pan 	switch (state) {
218791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
219791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
220791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
221791c4769Sxinhui pan 			tmp = RREG32(reg);
222791c4769Sxinhui pan 			tmp &= ~bits;
223791c4769Sxinhui pan 			WREG32(reg, tmp);
224791c4769Sxinhui pan 		}
225791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
226791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
227791c4769Sxinhui pan 			tmp = RREG32(reg);
228791c4769Sxinhui pan 			tmp &= ~bits;
229791c4769Sxinhui pan 			WREG32(reg, tmp);
230791c4769Sxinhui pan 		}
231791c4769Sxinhui pan 		break;
232791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
233791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
234791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
235791c4769Sxinhui pan 			tmp = RREG32(reg);
236791c4769Sxinhui pan 			tmp |= bits;
237791c4769Sxinhui pan 			WREG32(reg, tmp);
238791c4769Sxinhui pan 		}
239791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
240791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
241791c4769Sxinhui pan 			tmp = RREG32(reg);
242791c4769Sxinhui pan 			tmp |= bits;
243791c4769Sxinhui pan 			WREG32(reg, tmp);
244791c4769Sxinhui pan 		}
245791c4769Sxinhui pan 		break;
246791c4769Sxinhui pan 	default:
247791c4769Sxinhui pan 		break;
248791c4769Sxinhui pan 	}
249791c4769Sxinhui pan 
250791c4769Sxinhui pan 	return 0;
251791c4769Sxinhui pan }
252791c4769Sxinhui pan 
253e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
254e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
255e60f8db5SAlex Xie 					unsigned type,
256e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
257e60f8db5SAlex Xie {
258e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
259ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
260e60f8db5SAlex Xie 
26111250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
26211250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
26311250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
26411250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
26511250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
26611250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
26711250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
26811250164SChristian König 
269e60f8db5SAlex Xie 	switch (state) {
270e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
2711daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
272ae6d1416STom St Denis 			hub = &adev->vmhub[j];
273e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
274e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
275e60f8db5SAlex Xie 				tmp = RREG32(reg);
276e60f8db5SAlex Xie 				tmp &= ~bits;
277e60f8db5SAlex Xie 				WREG32(reg, tmp);
278e60f8db5SAlex Xie 			}
279e60f8db5SAlex Xie 		}
280e60f8db5SAlex Xie 		break;
281e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
2821daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
283ae6d1416STom St Denis 			hub = &adev->vmhub[j];
284e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
285e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
286e60f8db5SAlex Xie 				tmp = RREG32(reg);
287e60f8db5SAlex Xie 				tmp |= bits;
288e60f8db5SAlex Xie 				WREG32(reg, tmp);
289e60f8db5SAlex Xie 			}
290e60f8db5SAlex Xie 		}
291e60f8db5SAlex Xie 	default:
292e60f8db5SAlex Xie 		break;
293e60f8db5SAlex Xie 	}
294e60f8db5SAlex Xie 
295e60f8db5SAlex Xie 	return 0;
296e60f8db5SAlex Xie }
297e60f8db5SAlex Xie 
298e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
299e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
300e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
301e60f8db5SAlex Xie {
30251c60898SLe Ma 	struct amdgpu_vmhub *hub;
303c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
3044d6cbde3SFelix Kuehling 	uint32_t status = 0;
305e60f8db5SAlex Xie 	u64 addr;
30651c60898SLe Ma 	char hub_name[10];
307e60f8db5SAlex Xie 
308e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
309e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
310e60f8db5SAlex Xie 
311c1a8abd9SChristian König 	if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
312c1a8abd9SChristian König 						    entry->timestamp))
31322666cc1SChristian König 		return 1; /* This also prevents sending it to KFD */
31422666cc1SChristian König 
31551c60898SLe Ma 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
31651c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub0");
31751c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
31851c60898SLe Ma 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
31951c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub1");
32051c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_1];
32151c60898SLe Ma 	} else {
32251c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "gfxhub0");
32351c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
32451c60898SLe Ma 	}
32551c60898SLe Ma 
326c1a8abd9SChristian König 	/* If it's the first fault for this address, process it normally */
327ec671737SChristian König 	if (retry_fault && !in_interrupt() &&
328ec671737SChristian König 	    amdgpu_vm_handle_fault(adev, entry->pasid, addr))
329ec671737SChristian König 		return 1; /* This also prevents sending it to KFD */
330ec671737SChristian König 
33179a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
33253499173SXiaojie Yuan 		/*
33353499173SXiaojie Yuan 		 * Issue a dummy read to wait for the status register to
33453499173SXiaojie Yuan 		 * be updated to avoid reading an incorrect value due to
33553499173SXiaojie Yuan 		 * the new fast GRBM interface.
33653499173SXiaojie Yuan 		 */
33753499173SXiaojie Yuan 		if (entry->vmid_src == AMDGPU_GFXHUB_0)
33853499173SXiaojie Yuan 			RREG32(hub->vm_l2_pro_fault_status);
33953499173SXiaojie Yuan 
3405a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
3415a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
3424d6cbde3SFelix Kuehling 	}
343e60f8db5SAlex Xie 
3444d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
34505794effSShirish S 		struct amdgpu_task_info task_info;
346efaa9646SAndrey Grodzovsky 
34705794effSShirish S 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
348efaa9646SAndrey Grodzovsky 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
349efaa9646SAndrey Grodzovsky 
3504d6cbde3SFelix Kuehling 		dev_err(adev->dev,
351c468f9e2SChristian König 			"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
352c468f9e2SChristian König 			"pasid:%u, for process %s pid %d thread %s pid %d)\n",
35351c60898SLe Ma 			hub_name, retry_fault ? "retry" : "no-retry",
354c4f46f22SChristian König 			entry->src_id, entry->ring_id, entry->vmid,
355efaa9646SAndrey Grodzovsky 			entry->pasid, task_info.process_name, task_info.tgid,
356efaa9646SAndrey Grodzovsky 			task_info.task_name, task_info.pid);
3575ddd4a9aSYong Zhao 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
35879a0c465SMonk Liu 			addr, entry->client_id);
3595ddd4a9aSYong Zhao 		if (!amdgpu_sriov_vf(adev)) {
3604d6cbde3SFelix Kuehling 			dev_err(adev->dev,
3614d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
3624d6cbde3SFelix Kuehling 				status);
3635ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
3645ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3655ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
3665ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
3675ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3685ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
3695ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
3705ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3715ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
3725ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
3735ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3745ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
3754e0ae5e2SYong Zhao 			dev_err(adev->dev, "\t RW: 0x%lx\n",
3764e0ae5e2SYong Zhao 				REG_GET_FIELD(status,
3774e0ae5e2SYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, RW));
3785ddd4a9aSYong Zhao 
3795ddd4a9aSYong Zhao 		}
38079a0c465SMonk Liu 	}
381e60f8db5SAlex Xie 
382e60f8db5SAlex Xie 	return 0;
383e60f8db5SAlex Xie }
384e60f8db5SAlex Xie 
385e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
386e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
387e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
388e60f8db5SAlex Xie };
389e60f8db5SAlex Xie 
390791c4769Sxinhui pan 
391791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
392791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
39334cc4fd9STao Zhou 	.process = amdgpu_umc_process_ecc_irq,
394791c4769Sxinhui pan };
395791c4769Sxinhui pan 
396e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
397e60f8db5SAlex Xie {
398770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
399770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
400791c4769Sxinhui pan 
4012ee9403eSZhigang Luo 	if (!amdgpu_sriov_vf(adev)) {
402791c4769Sxinhui pan 		adev->gmc.ecc_irq.num_types = 1;
403791c4769Sxinhui pan 		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
404e60f8db5SAlex Xie 	}
4052ee9403eSZhigang Luo }
406e60f8db5SAlex Xie 
4072a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
4082a79d868SYong Zhao 					uint32_t flush_type)
40903f89febSChristian König {
41003f89febSChristian König 	u32 req = 0;
41103f89febSChristian König 
41203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
413c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
4142a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
41503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
41603f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
41703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
41803f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
41903f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
42003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
42103f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
42203f89febSChristian König 
42303f89febSChristian König 	return req;
42403f89febSChristian König }
42503f89febSChristian König 
42690f6452cSchangzhu /**
42790f6452cSchangzhu  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
42890f6452cSchangzhu  *
42990f6452cSchangzhu  * @adev: amdgpu_device pointer
43090f6452cSchangzhu  * @vmhub: vmhub type
43190f6452cSchangzhu  *
43290f6452cSchangzhu  */
43390f6452cSchangzhu static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
43490f6452cSchangzhu 				       uint32_t vmhub)
43590f6452cSchangzhu {
43690f6452cSchangzhu 	return ((vmhub == AMDGPU_MMHUB_0 ||
43790f6452cSchangzhu 		 vmhub == AMDGPU_MMHUB_1) &&
43890f6452cSchangzhu 		(!amdgpu_sriov_vf(adev)) &&
43990f6452cSchangzhu 		(!(adev->asic_type == CHIP_RAVEN &&
44090f6452cSchangzhu 		   adev->rev_id < 0x8 &&
44190f6452cSchangzhu 		   adev->pdev->device == 0x15d8)));
44290f6452cSchangzhu }
44390f6452cSchangzhu 
444e60f8db5SAlex Xie /*
445e60f8db5SAlex Xie  * GART
446e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
447e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
448e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
449e60f8db5SAlex Xie  */
450e60f8db5SAlex Xie 
451e60f8db5SAlex Xie /**
4522a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
453e60f8db5SAlex Xie  *
454e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
455e60f8db5SAlex Xie  * @vmid: vm instance to flush
4562a79d868SYong Zhao  * @flush_type: the flush type
457e60f8db5SAlex Xie  *
4582a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
459e60f8db5SAlex Xie  */
4603ff98548SOak Zeng static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
4613ff98548SOak Zeng 					uint32_t vmhub, uint32_t flush_type)
462e60f8db5SAlex Xie {
46390f6452cSchangzhu 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
464e60f8db5SAlex Xie 	const unsigned eng = 17;
4653ff98548SOak Zeng 	u32 j, tmp;
4663ff98548SOak Zeng 	struct amdgpu_vmhub *hub;
467e60f8db5SAlex Xie 
4683ff98548SOak Zeng 	BUG_ON(vmhub >= adev->num_vmhubs);
4693ff98548SOak Zeng 
4703ff98548SOak Zeng 	hub = &adev->vmhub[vmhub];
4713ff98548SOak Zeng 	tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
472e60f8db5SAlex Xie 
47382d1a1b1SChengming Gui 	/* This is necessary for a HW workaround under SRIOV as well
47482d1a1b1SChengming Gui 	 * as GFXOFF under bare metal
47582d1a1b1SChengming Gui 	 */
47682d1a1b1SChengming Gui 	if (adev->gfx.kiq.ring.sched.ready &&
47782d1a1b1SChengming Gui 			(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
47882d1a1b1SChengming Gui 			!adev->in_gpu_reset) {
479af5fe1e9SChristian König 		uint32_t req = hub->vm_inv_eng0_req + eng;
480af5fe1e9SChristian König 		uint32_t ack = hub->vm_inv_eng0_ack + eng;
481af5fe1e9SChristian König 
482af5fe1e9SChristian König 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
483af5fe1e9SChristian König 				1 << vmid);
4843ff98548SOak Zeng 		return;
485fc0faf04SEmily Deng 	}
4863890d111SEmily Deng 
4873890d111SEmily Deng 	spin_lock(&adev->gmc.invalidate_lock);
488f920d1bbSchangzhu 
489f920d1bbSchangzhu 	/*
490f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
491f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
492f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
493f920d1bbSchangzhu 	 * to WA the Issue
494f920d1bbSchangzhu 	 */
495f920d1bbSchangzhu 
496f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
49790f6452cSchangzhu 	if (use_semaphore) {
498f920d1bbSchangzhu 		for (j = 0; j < adev->usec_timeout; j++) {
499f920d1bbSchangzhu 			/* a read return value of 1 means semaphore acuqire */
500f920d1bbSchangzhu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
501f920d1bbSchangzhu 			if (tmp & 0x1)
502f920d1bbSchangzhu 				break;
503f920d1bbSchangzhu 			udelay(1);
504f920d1bbSchangzhu 		}
505f920d1bbSchangzhu 
506f920d1bbSchangzhu 		if (j >= adev->usec_timeout)
507f920d1bbSchangzhu 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
508f920d1bbSchangzhu 	}
509f920d1bbSchangzhu 
510c7a7266bSXiangliang Yu 	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
51153499173SXiaojie Yuan 
51253499173SXiaojie Yuan 	/*
51353499173SXiaojie Yuan 	 * Issue a dummy read to wait for the ACK register to be cleared
51453499173SXiaojie Yuan 	 * to avoid a false ACK due to the new fast GRBM interface.
51553499173SXiaojie Yuan 	 */
51653499173SXiaojie Yuan 	if (vmhub == AMDGPU_GFXHUB_0)
51753499173SXiaojie Yuan 		RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
51853499173SXiaojie Yuan 
519e60f8db5SAlex Xie 	for (j = 0; j < adev->usec_timeout; j++) {
520c7a7266bSXiangliang Yu 		tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
521396557b0SChristian König 		if (tmp & (1 << vmid))
522e60f8db5SAlex Xie 			break;
523e60f8db5SAlex Xie 		udelay(1);
524e60f8db5SAlex Xie 	}
525f920d1bbSchangzhu 
526f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
52790f6452cSchangzhu 	if (use_semaphore)
528f920d1bbSchangzhu 		/*
529f920d1bbSchangzhu 		 * add semaphore release after invalidation,
530f920d1bbSchangzhu 		 * write with 0 means semaphore release
531f920d1bbSchangzhu 		 */
532f920d1bbSchangzhu 		WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
533f920d1bbSchangzhu 
5343890d111SEmily Deng 	spin_unlock(&adev->gmc.invalidate_lock);
535f920d1bbSchangzhu 
536396557b0SChristian König 	if (j < adev->usec_timeout)
5373ff98548SOak Zeng 		return;
538396557b0SChristian König 
539e60f8db5SAlex Xie 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
540e60f8db5SAlex Xie }
541e60f8db5SAlex Xie 
5429096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
543c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
5449096d6e5SChristian König {
54590f6452cSchangzhu 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
546250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
547250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
5482a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
5499096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
5509096d6e5SChristian König 
551f920d1bbSchangzhu 	/*
552f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
553f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
554f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
555f920d1bbSchangzhu 	 * to WA the Issue
556f920d1bbSchangzhu 	 */
557f920d1bbSchangzhu 
558f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
55990f6452cSchangzhu 	if (use_semaphore)
560f920d1bbSchangzhu 		/* a read return value of 1 means semaphore acuqire */
561f920d1bbSchangzhu 		amdgpu_ring_emit_reg_wait(ring,
562f920d1bbSchangzhu 					  hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
563f920d1bbSchangzhu 
5649096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
5659096d6e5SChristian König 			      lower_32_bits(pd_addr));
5669096d6e5SChristian König 
5679096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
5689096d6e5SChristian König 			      upper_32_bits(pd_addr));
5699096d6e5SChristian König 
570f8bc9037SAlex Deucher 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
571f8bc9037SAlex Deucher 					    hub->vm_inv_eng0_ack + eng,
572f8bc9037SAlex Deucher 					    req, 1 << vmid);
573f732b6b3SChristian König 
574f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
57590f6452cSchangzhu 	if (use_semaphore)
576f920d1bbSchangzhu 		/*
577f920d1bbSchangzhu 		 * add semaphore release after invalidation,
578f920d1bbSchangzhu 		 * write with 0 means semaphore release
579f920d1bbSchangzhu 		 */
580f920d1bbSchangzhu 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
581f920d1bbSchangzhu 
5829096d6e5SChristian König 	return pd_addr;
5839096d6e5SChristian König }
5849096d6e5SChristian König 
585c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
586c633c00bSChristian König 					unsigned pasid)
587c633c00bSChristian König {
588c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
589c633c00bSChristian König 	uint32_t reg;
590c633c00bSChristian König 
591f2d66571SLe Ma 	/* Do nothing because there's no lut register for mmhub1. */
592f2d66571SLe Ma 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
593f2d66571SLe Ma 		return;
594f2d66571SLe Ma 
595a2d15ed7SLe Ma 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
596c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
597c633c00bSChristian König 	else
598c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
599c633c00bSChristian König 
600c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
601c633c00bSChristian König }
602c633c00bSChristian König 
603e60f8db5SAlex Xie /*
604e60f8db5SAlex Xie  * PTE format on VEGA 10:
605e60f8db5SAlex Xie  * 63:59 reserved
606e60f8db5SAlex Xie  * 58:57 mtype
607e60f8db5SAlex Xie  * 56 F
608e60f8db5SAlex Xie  * 55 L
609e60f8db5SAlex Xie  * 54 P
610e60f8db5SAlex Xie  * 53 SW
611e60f8db5SAlex Xie  * 52 T
612e60f8db5SAlex Xie  * 50:48 reserved
613e60f8db5SAlex Xie  * 47:12 4k physical page base address
614e60f8db5SAlex Xie  * 11:7 fragment
615e60f8db5SAlex Xie  * 6 write
616e60f8db5SAlex Xie  * 5 read
617e60f8db5SAlex Xie  * 4 exe
618e60f8db5SAlex Xie  * 3 Z
619e60f8db5SAlex Xie  * 2 snooped
620e60f8db5SAlex Xie  * 1 system
621e60f8db5SAlex Xie  * 0 valid
622e60f8db5SAlex Xie  *
623e60f8db5SAlex Xie  * PDE format on VEGA 10:
624e60f8db5SAlex Xie  * 63:59 block fragment size
625e60f8db5SAlex Xie  * 58:55 reserved
626e60f8db5SAlex Xie  * 54 P
627e60f8db5SAlex Xie  * 53:48 reserved
628e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
629e60f8db5SAlex Xie  * 5:3 reserved
630e60f8db5SAlex Xie  * 2 C
631e60f8db5SAlex Xie  * 1 system
632e60f8db5SAlex Xie  * 0 valid
633e60f8db5SAlex Xie  */
634e60f8db5SAlex Xie 
63571776b6dSChristian König static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
636e60f8db5SAlex Xie 
637e60f8db5SAlex Xie {
63871776b6dSChristian König 	switch (flags) {
639e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
64071776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
641e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
64271776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
643e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
64471776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
645093e48c0SOak Zeng 	case AMDGPU_VM_MTYPE_RW:
64671776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
647e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
64871776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
649e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
65071776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
651e60f8db5SAlex Xie 	default:
65271776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
653e60f8db5SAlex Xie 	}
654e60f8db5SAlex Xie }
655e60f8db5SAlex Xie 
6563de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
6573de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
658f75e237cSChristian König {
659bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
6603de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
661770d13b1SChristian König 			adev->gmc.vram_start;
6623de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
6636a42fd6fSChristian König 
664770d13b1SChristian König 	if (!adev->gmc.translate_further)
6656a42fd6fSChristian König 		return;
6666a42fd6fSChristian König 
6676a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
6686a42fd6fSChristian König 		/* Set the block fragment size */
6696a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
6706a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
6716a42fd6fSChristian König 
6726a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
6736a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
6746a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
6756a42fd6fSChristian König 		else
6766a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
6776a42fd6fSChristian König 	}
678f75e237cSChristian König }
679f75e237cSChristian König 
680cbfae36cSChristian König static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
681cbfae36cSChristian König 				struct amdgpu_bo_va_mapping *mapping,
682cbfae36cSChristian König 				uint64_t *flags)
683cbfae36cSChristian König {
684cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
685cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
686cbfae36cSChristian König 
687cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
688cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
689cbfae36cSChristian König 
690cbfae36cSChristian König 	if (mapping->flags & AMDGPU_PTE_PRT) {
691cbfae36cSChristian König 		*flags |= AMDGPU_PTE_PRT;
692cbfae36cSChristian König 		*flags &= ~AMDGPU_PTE_VALID;
693cbfae36cSChristian König 	}
694cbfae36cSChristian König 
695cbfae36cSChristian König 	if (adev->asic_type == CHIP_ARCTURUS &&
696cbfae36cSChristian König 	    !(*flags & AMDGPU_PTE_SYSTEM) &&
697cbfae36cSChristian König 	    mapping->bo_va->is_xgmi)
698cbfae36cSChristian König 		*flags |= AMDGPU_PTE_SNOOPED;
699cbfae36cSChristian König }
700cbfae36cSChristian König 
701132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
702132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
7039096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
704c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
70571776b6dSChristian König 	.map_mtype = gmc_v9_0_map_mtype,
706cbfae36cSChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde,
707cbfae36cSChristian König 	.get_vm_pte = gmc_v9_0_get_vm_pte
708e60f8db5SAlex Xie };
709e60f8db5SAlex Xie 
710132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
711e60f8db5SAlex Xie {
712132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
713e60f8db5SAlex Xie }
714e60f8db5SAlex Xie 
7155b6b35aaSHawking Zhang static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
7165b6b35aaSHawking Zhang {
7175b6b35aaSHawking Zhang 	switch (adev->asic_type) {
718e7da754bSMonk Liu 	case CHIP_VEGA10:
719e7da754bSMonk Liu 		adev->umc.funcs = &umc_v6_0_funcs;
720e7da754bSMonk Liu 		break;
7215b6b35aaSHawking Zhang 	case CHIP_VEGA20:
7223aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
7233aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
7243aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
7254cf781c2SJohn Clements 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
7264cf781c2SJohn Clements 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
7274cf781c2SJohn Clements 		adev->umc.funcs = &umc_v6_1_funcs;
7284cf781c2SJohn Clements 		break;
7299e612c11SHawking Zhang 	case CHIP_ARCTURUS:
7303aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
7313aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
7323aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
7334cf781c2SJohn Clements 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
7343aacf4eaSTao Zhou 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
735045c0216STao Zhou 		adev->umc.funcs = &umc_v6_1_funcs;
7365b6b35aaSHawking Zhang 		break;
7375b6b35aaSHawking Zhang 	default:
7385b6b35aaSHawking Zhang 		break;
7395b6b35aaSHawking Zhang 	}
7405b6b35aaSHawking Zhang }
7415b6b35aaSHawking Zhang 
7423d093da0STao Zhou static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
7433d093da0STao Zhou {
7443d093da0STao Zhou 	switch (adev->asic_type) {
7453d093da0STao Zhou 	case CHIP_VEGA20:
746d65bf1f8STao Zhou 		adev->mmhub.funcs = &mmhub_v1_0_funcs;
7473d093da0STao Zhou 		break;
748f6c3623bSDennis Li 	case CHIP_ARCTURUS:
749f6c3623bSDennis Li 		adev->mmhub.funcs = &mmhub_v9_4_funcs;
750f6c3623bSDennis Li 		break;
7513d093da0STao Zhou 	default:
7523d093da0STao Zhou 		break;
7533d093da0STao Zhou 	}
7543d093da0STao Zhou }
7553d093da0STao Zhou 
756e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
757e60f8db5SAlex Xie {
758e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
759e60f8db5SAlex Xie 
760132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
761e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
7625b6b35aaSHawking Zhang 	gmc_v9_0_set_umc_funcs(adev);
7633d093da0STao Zhou 	gmc_v9_0_set_mmhub_funcs(adev);
764e60f8db5SAlex Xie 
765770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
766770d13b1SChristian König 	adev->gmc.shared_aperture_end =
767770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
768bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
769770d13b1SChristian König 	adev->gmc.private_aperture_end =
770770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
771a7ea6548SAlex Deucher 
772e60f8db5SAlex Xie 	return 0;
773e60f8db5SAlex Xie }
774e60f8db5SAlex Xie 
775cd2b5623SAlex Deucher static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
776cd2b5623SAlex Deucher {
777cd2b5623SAlex Deucher 
778cd2b5623SAlex Deucher 	/*
779cd2b5623SAlex Deucher 	 * TODO:
780cd2b5623SAlex Deucher 	 * Currently there is a bug where some memory client outside
781cd2b5623SAlex Deucher 	 * of the driver writes to first 8M of VRAM on S3 resume,
782cd2b5623SAlex Deucher 	 * this overrides GART which by default gets placed in first 8M and
783cd2b5623SAlex Deucher 	 * causes VM_FAULTS once GTT is accessed.
784cd2b5623SAlex Deucher 	 * Keep the stolen memory reservation until the while this is not solved.
785cd2b5623SAlex Deucher 	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
786cd2b5623SAlex Deucher 	 */
787cd2b5623SAlex Deucher 	switch (adev->asic_type) {
78895010ba7SAlex Deucher 	case CHIP_VEGA10:
7896abc0c8fSAlex Deucher 	case CHIP_RAVEN:
790bfa3a9bbSHawking Zhang 	case CHIP_ARCTURUS:
7918787ee01SHuang Rui 	case CHIP_RENOIR:
79202122753SFlora Cui 		return true;
7936abc0c8fSAlex Deucher 	case CHIP_VEGA12:
794cd2b5623SAlex Deucher 	case CHIP_VEGA20:
795cd2b5623SAlex Deucher 	default:
7966abc0c8fSAlex Deucher 		return false;
797cd2b5623SAlex Deucher 	}
798cd2b5623SAlex Deucher }
799cd2b5623SAlex Deucher 
800c713a461SEvan Quan static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
801c713a461SEvan Quan {
802c713a461SEvan Quan 	struct amdgpu_ring *ring;
803c713a461SEvan Quan 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
804c8a6e2a3SLe Ma 		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
805c8a6e2a3SLe Ma 		GFXHUB_FREE_VM_INV_ENGS_BITMAP};
806c713a461SEvan Quan 	unsigned i;
807c713a461SEvan Quan 	unsigned vmhub, inv_eng;
808c713a461SEvan Quan 
809c713a461SEvan Quan 	for (i = 0; i < adev->num_rings; ++i) {
810c713a461SEvan Quan 		ring = adev->rings[i];
811c713a461SEvan Quan 		vmhub = ring->funcs->vmhub;
812c713a461SEvan Quan 
813c713a461SEvan Quan 		inv_eng = ffs(vm_inv_engs[vmhub]);
814c713a461SEvan Quan 		if (!inv_eng) {
815c713a461SEvan Quan 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
816c713a461SEvan Quan 				ring->name);
817c713a461SEvan Quan 			return -EINVAL;
818c713a461SEvan Quan 		}
819c713a461SEvan Quan 
820c713a461SEvan Quan 		ring->vm_inv_eng = inv_eng - 1;
82172464382SChristian König 		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
822c713a461SEvan Quan 
823c713a461SEvan Quan 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
824c713a461SEvan Quan 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
825c713a461SEvan Quan 	}
826c713a461SEvan Quan 
827c713a461SEvan Quan 	return 0;
828c713a461SEvan Quan }
829c713a461SEvan Quan 
830e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
831e60f8db5SAlex Xie {
832e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
833c5b6e585STao Zhou 	int r;
8344789c463SChristian König 
835cd2b5623SAlex Deucher 	if (!gmc_v9_0_keep_stolen_memory(adev))
836cd2b5623SAlex Deucher 		amdgpu_bo_late_init(adev);
8376f752ec2SAndrey Grodzovsky 
838c713a461SEvan Quan 	r = gmc_v9_0_allocate_vm_inv_eng(adev);
839c713a461SEvan Quan 	if (r)
840c713a461SEvan Quan 		return r;
841f49ea9f8SHawking Zhang 	/* Check if ecc is available */
842f49ea9f8SHawking Zhang 	if (!amdgpu_sriov_vf(adev)) {
843f49ea9f8SHawking Zhang 		switch (adev->asic_type) {
844f49ea9f8SHawking Zhang 		case CHIP_VEGA10:
845f49ea9f8SHawking Zhang 		case CHIP_VEGA20:
8469e612c11SHawking Zhang 		case CHIP_ARCTURUS:
847f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_mem_ecc_supported(adev);
848f49ea9f8SHawking Zhang 			if (!r) {
84902bab923SDavid Panariti 				DRM_INFO("ECC is not present.\n");
850f49ea9f8SHawking Zhang 				if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
851e1d1a772SAlex Deucher 					adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
85202bab923SDavid Panariti 			} else {
853f49ea9f8SHawking Zhang 				DRM_INFO("ECC is active.\n");
854f49ea9f8SHawking Zhang 			}
855f49ea9f8SHawking Zhang 
856f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_sram_ecc_supported(adev);
857f49ea9f8SHawking Zhang 			if (!r) {
858f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is not present.\n");
859f49ea9f8SHawking Zhang 			} else {
860f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is active.\n");
861f49ea9f8SHawking Zhang 			}
862f49ea9f8SHawking Zhang 			break;
863f49ea9f8SHawking Zhang 		default:
864f49ea9f8SHawking Zhang 			break;
86502bab923SDavid Panariti 		}
8665ba4fa35SAlex Deucher 	}
86702bab923SDavid Panariti 
868ba083492STao Zhou 	r = amdgpu_gmc_ras_late_init(adev);
869791c4769Sxinhui pan 	if (r)
870e60f8db5SAlex Xie 		return r;
871e60f8db5SAlex Xie 
872770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
873e60f8db5SAlex Xie }
874e60f8db5SAlex Xie 
875e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
876770d13b1SChristian König 					struct amdgpu_gmc *mc)
877e60f8db5SAlex Xie {
878e60f8db5SAlex Xie 	u64 base = 0;
8799d4f837aSFrank.Min 
88051cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
88151cce480SLe Ma 		base = mmhub_v9_4_get_fb_location(adev);
8829d4f837aSFrank.Min 	else if (!amdgpu_sriov_vf(adev))
883e60f8db5SAlex Xie 		base = mmhub_v1_0_get_fb_location(adev);
8849d4f837aSFrank.Min 
8856fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
8866fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
88783afe835SOak Zeng 	amdgpu_gmc_vram_location(adev, mc, base);
888961c75cfSChristian König 	amdgpu_gmc_gart_location(adev, mc);
889c3e1b43cSChristian König 	amdgpu_gmc_agp_location(adev, mc);
890e60f8db5SAlex Xie 	/* base offset of vram pages */
891e60f8db5SAlex Xie 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
8926fdd68b1SAlex Deucher 
8936fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
8946fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
8956fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
896e60f8db5SAlex Xie }
897e60f8db5SAlex Xie 
898e60f8db5SAlex Xie /**
899e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
900e60f8db5SAlex Xie  *
901e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
902e60f8db5SAlex Xie  *
903e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
904e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
905e60f8db5SAlex Xie  * Returns 0 for success.
906e60f8db5SAlex Xie  */
907e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
908e60f8db5SAlex Xie {
909e60f8db5SAlex Xie 	int r;
910e60f8db5SAlex Xie 
911e60f8db5SAlex Xie 	/* size in MB on si */
912770d13b1SChristian König 	adev->gmc.mc_vram_size =
913bebc0762SHawking Zhang 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
914770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
915e60f8db5SAlex Xie 
916e60f8db5SAlex Xie 	if (!(adev->flags & AMD_IS_APU)) {
917e60f8db5SAlex Xie 		r = amdgpu_device_resize_fb_bar(adev);
918e60f8db5SAlex Xie 		if (r)
919e60f8db5SAlex Xie 			return r;
920e60f8db5SAlex Xie 	}
921770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
922770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
923e60f8db5SAlex Xie 
924156a81beSChunming Zhou #ifdef CONFIG_X86_64
925156a81beSChunming Zhou 	if (adev->flags & AMD_IS_APU) {
926156a81beSChunming Zhou 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
927156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
928156a81beSChunming Zhou 	}
929156a81beSChunming Zhou #endif
930e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
931770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
932770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
933770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
934e60f8db5SAlex Xie 
935e60f8db5SAlex Xie 	/* set the gart size */
936e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1) {
937e60f8db5SAlex Xie 		switch (adev->asic_type) {
938e60f8db5SAlex Xie 		case CHIP_VEGA10:  /* all engines support GPUVM */
939273a14cdSAlex Deucher 		case CHIP_VEGA12:  /* all engines support GPUVM */
940d96b428cSFeifei Xu 		case CHIP_VEGA20:
9413de2ff5dSLe Ma 		case CHIP_ARCTURUS:
942e60f8db5SAlex Xie 		default:
943fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
944e60f8db5SAlex Xie 			break;
945e60f8db5SAlex Xie 		case CHIP_RAVEN:   /* DCE SG support */
9468787ee01SHuang Rui 		case CHIP_RENOIR:
947770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
948e60f8db5SAlex Xie 			break;
949e60f8db5SAlex Xie 		}
950e60f8db5SAlex Xie 	} else {
951770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
952e60f8db5SAlex Xie 	}
953e60f8db5SAlex Xie 
954770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
955e60f8db5SAlex Xie 
956e60f8db5SAlex Xie 	return 0;
957e60f8db5SAlex Xie }
958e60f8db5SAlex Xie 
959e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
960e60f8db5SAlex Xie {
961e60f8db5SAlex Xie 	int r;
962e60f8db5SAlex Xie 
9631123b989SChristian König 	if (adev->gart.bo) {
964e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
965e60f8db5SAlex Xie 		return 0;
966e60f8db5SAlex Xie 	}
967e60f8db5SAlex Xie 	/* Initialize common gart structure */
968e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
969e60f8db5SAlex Xie 	if (r)
970e60f8db5SAlex Xie 		return r;
971e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
9727596ab68SHawking Zhang 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
973e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
974e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
975e60f8db5SAlex Xie }
976e60f8db5SAlex Xie 
977ebdef28eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
978ebdef28eSAlex Deucher {
979bfa3a9bbSHawking Zhang 	u32 d1vga_control;
980ebdef28eSAlex Deucher 	unsigned size;
981ebdef28eSAlex Deucher 
9826f752ec2SAndrey Grodzovsky 	/*
9836f752ec2SAndrey Grodzovsky 	 * TODO Remove once GART corruption is resolved
9846f752ec2SAndrey Grodzovsky 	 * Check related code in gmc_v9_0_sw_fini
9856f752ec2SAndrey Grodzovsky 	 * */
986cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
987cd2b5623SAlex Deucher 		return 9 * 1024 * 1024;
9886f752ec2SAndrey Grodzovsky 
989bfa3a9bbSHawking Zhang 	d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
990ebdef28eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
991ebdef28eSAlex Deucher 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
992ebdef28eSAlex Deucher 	} else {
993ebdef28eSAlex Deucher 		u32 viewport;
994ebdef28eSAlex Deucher 
995ebdef28eSAlex Deucher 		switch (adev->asic_type) {
996ebdef28eSAlex Deucher 		case CHIP_RAVEN:
9978787ee01SHuang Rui 		case CHIP_RENOIR:
998ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
999ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
1000ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1001ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport,
1002ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1003ebdef28eSAlex Deucher 				4);
1004ebdef28eSAlex Deucher 			break;
1005ebdef28eSAlex Deucher 		case CHIP_VEGA10:
1006ebdef28eSAlex Deucher 		case CHIP_VEGA12:
1007cd2b5623SAlex Deucher 		case CHIP_VEGA20:
1008ebdef28eSAlex Deucher 		default:
1009ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1010ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1011ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1012ebdef28eSAlex Deucher 				4);
1013ebdef28eSAlex Deucher 			break;
1014ebdef28eSAlex Deucher 		}
1015ebdef28eSAlex Deucher 	}
1016ebdef28eSAlex Deucher 	/* return 0 if the pre-OS buffer uses up most of vram */
1017ebdef28eSAlex Deucher 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1018ebdef28eSAlex Deucher 		return 0;
10196f752ec2SAndrey Grodzovsky 
1020ebdef28eSAlex Deucher 	return size;
1021ebdef28eSAlex Deucher }
1022ebdef28eSAlex Deucher 
1023e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
1024e60f8db5SAlex Xie {
1025ad02e08eSOri Messinger 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1026e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1027e60f8db5SAlex Xie 
1028e60f8db5SAlex Xie 	gfxhub_v1_0_init(adev);
102951cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
103051cce480SLe Ma 		mmhub_v9_4_init(adev);
103151cce480SLe Ma 	else
1032e60f8db5SAlex Xie 		mmhub_v1_0_init(adev);
1033e60f8db5SAlex Xie 
1034770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
1035e60f8db5SAlex Xie 
1036ad02e08eSOri Messinger 	r = amdgpu_atomfirmware_get_vram_info(adev,
1037ad02e08eSOri Messinger 		&vram_width, &vram_type, &vram_vendor);
1038631cdbd2SAlex Deucher 	if (amdgpu_sriov_vf(adev))
1039631cdbd2SAlex Deucher 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1040631cdbd2SAlex Deucher 		 * and DF related registers is not readable, seems hardcord is the
1041631cdbd2SAlex Deucher 		 * only way to set the correct vram_width
1042631cdbd2SAlex Deucher 		 */
1043631cdbd2SAlex Deucher 		adev->gmc.vram_width = 2048;
1044631cdbd2SAlex Deucher 	else if (amdgpu_emu_mode != 1)
1045631cdbd2SAlex Deucher 		adev->gmc.vram_width = vram_width;
1046631cdbd2SAlex Deucher 
1047631cdbd2SAlex Deucher 	if (!adev->gmc.vram_width) {
1048631cdbd2SAlex Deucher 		int chansize, numchan;
1049631cdbd2SAlex Deucher 
1050631cdbd2SAlex Deucher 		/* hbm memory channel size */
1051631cdbd2SAlex Deucher 		if (adev->flags & AMD_IS_APU)
1052631cdbd2SAlex Deucher 			chansize = 64;
1053631cdbd2SAlex Deucher 		else
1054631cdbd2SAlex Deucher 			chansize = 128;
1055631cdbd2SAlex Deucher 
1056631cdbd2SAlex Deucher 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
1057631cdbd2SAlex Deucher 		adev->gmc.vram_width = numchan * chansize;
1058631cdbd2SAlex Deucher 	}
1059631cdbd2SAlex Deucher 
1060631cdbd2SAlex Deucher 	adev->gmc.vram_type = vram_type;
1061ad02e08eSOri Messinger 	adev->gmc.vram_vendor = vram_vendor;
1062e60f8db5SAlex Xie 	switch (adev->asic_type) {
1063e60f8db5SAlex Xie 	case CHIP_RAVEN:
10641daa2bfaSLe Ma 		adev->num_vmhubs = 2;
10651daa2bfaSLe Ma 
10666a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1067f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
10686a42fd6fSChristian König 		} else {
10696a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
10706a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1071770d13b1SChristian König 			adev->gmc.translate_further =
10726a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
10736a42fd6fSChristian König 		}
1074e60f8db5SAlex Xie 		break;
1075e60f8db5SAlex Xie 	case CHIP_VEGA10:
1076273a14cdSAlex Deucher 	case CHIP_VEGA12:
1077d96b428cSFeifei Xu 	case CHIP_VEGA20:
10788787ee01SHuang Rui 	case CHIP_RENOIR:
10791daa2bfaSLe Ma 		adev->num_vmhubs = 2;
10801daa2bfaSLe Ma 
10818787ee01SHuang Rui 
1082e60f8db5SAlex Xie 		/*
1083e60f8db5SAlex Xie 		 * To fulfill 4-level page support,
1084e60f8db5SAlex Xie 		 * vm size is 256TB (48bit), maximum size of Vega10,
1085e60f8db5SAlex Xie 		 * block size 512 (9bit)
1086e60f8db5SAlex Xie 		 */
1087cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1088cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
1089cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1090cdba61daSwentalou 		else
1091f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1092e60f8db5SAlex Xie 		break;
10933de2ff5dSLe Ma 	case CHIP_ARCTURUS:
1094c8a6e2a3SLe Ma 		adev->num_vmhubs = 3;
1095c8a6e2a3SLe Ma 
10963de2ff5dSLe Ma 		/* Keep the vm size same with Vega20 */
10973de2ff5dSLe Ma 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
10983de2ff5dSLe Ma 		break;
1099e60f8db5SAlex Xie 	default:
1100e60f8db5SAlex Xie 		break;
1101e60f8db5SAlex Xie 	}
1102e60f8db5SAlex Xie 
1103e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
110444a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1105770d13b1SChristian König 				&adev->gmc.vm_fault);
110630da7bb1SChristian König 	if (r)
110730da7bb1SChristian König 		return r;
110830da7bb1SChristian König 
11097d19b15fSLe Ma 	if (adev->asic_type == CHIP_ARCTURUS) {
11107d19b15fSLe Ma 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
11117d19b15fSLe Ma 					&adev->gmc.vm_fault);
11127d19b15fSLe Ma 		if (r)
11137d19b15fSLe Ma 			return r;
11147d19b15fSLe Ma 	}
11157d19b15fSLe Ma 
111644a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1117770d13b1SChristian König 				&adev->gmc.vm_fault);
1118e60f8db5SAlex Xie 
1119e60f8db5SAlex Xie 	if (r)
1120e60f8db5SAlex Xie 		return r;
1121e60f8db5SAlex Xie 
11222ee9403eSZhigang Luo 	if (!amdgpu_sriov_vf(adev)) {
1123791c4769Sxinhui pan 		/* interrupt sent to DF. */
1124791c4769Sxinhui pan 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1125791c4769Sxinhui pan 				      &adev->gmc.ecc_irq);
1126791c4769Sxinhui pan 		if (r)
1127791c4769Sxinhui pan 			return r;
11282ee9403eSZhigang Luo 	}
1129791c4769Sxinhui pan 
1130e60f8db5SAlex Xie 	/* Set the internal MC address mask
1131e60f8db5SAlex Xie 	 * This is the max address of the GPU's
1132e60f8db5SAlex Xie 	 * internal address space.
1133e60f8db5SAlex Xie 	 */
1134770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1135e60f8db5SAlex Xie 
1136244511f3SChristoph Hellwig 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1137e60f8db5SAlex Xie 	if (r) {
1138e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1139244511f3SChristoph Hellwig 		return r;
1140e60f8db5SAlex Xie 	}
1141244511f3SChristoph Hellwig 	adev->need_swiotlb = drm_need_swiotlb(44);
1142e60f8db5SAlex Xie 
114347622ba0SAlex Deucher 	if (adev->gmc.xgmi.supported) {
1144bf0a60b7SAlex Deucher 		r = gfxhub_v1_1_get_xgmi_info(adev);
1145bf0a60b7SAlex Deucher 		if (r)
1146bf0a60b7SAlex Deucher 			return r;
1147bf0a60b7SAlex Deucher 	}
1148bf0a60b7SAlex Deucher 
1149e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
1150e60f8db5SAlex Xie 	if (r)
1151e60f8db5SAlex Xie 		return r;
1152e60f8db5SAlex Xie 
1153ebdef28eSAlex Deucher 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1154ebdef28eSAlex Deucher 
1155e60f8db5SAlex Xie 	/* Memory manager */
1156e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
1157e60f8db5SAlex Xie 	if (r)
1158e60f8db5SAlex Xie 		return r;
1159e60f8db5SAlex Xie 
1160e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
1161e60f8db5SAlex Xie 	if (r)
1162e60f8db5SAlex Xie 		return r;
1163e60f8db5SAlex Xie 
116405ec3edaSChristian König 	/*
116505ec3edaSChristian König 	 * number of VMs
116605ec3edaSChristian König 	 * VMID 0 is reserved for System
116705ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
116805ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
116905ec3edaSChristian König 	 */
1170a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1171a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1172c8a6e2a3SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
117305ec3edaSChristian König 
117405ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
117505ec3edaSChristian König 
117605ec3edaSChristian König 	return 0;
1177e60f8db5SAlex Xie }
1178e60f8db5SAlex Xie 
1179e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
1180e60f8db5SAlex Xie {
1181e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1182994dcfaaSTianci.Yin 	void *stolen_vga_buf;
1183e60f8db5SAlex Xie 
11842adf1344STao Zhou 	amdgpu_gmc_ras_fini(adev);
1185f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
1186e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
11876f752ec2SAndrey Grodzovsky 
1188cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
1189994dcfaaSTianci.Yin 		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
11906f752ec2SAndrey Grodzovsky 
1191a3d9103eSAndrey Grodzovsky 	amdgpu_gart_table_vram_free(adev);
1192e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
1193a3d9103eSAndrey Grodzovsky 	amdgpu_gart_fini(adev);
1194e60f8db5SAlex Xie 
1195e60f8db5SAlex Xie 	return 0;
1196e60f8db5SAlex Xie }
1197e60f8db5SAlex Xie 
1198e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1199e60f8db5SAlex Xie {
1200946a4d5bSShaoyun Liu 
1201e60f8db5SAlex Xie 	switch (adev->asic_type) {
1202e60f8db5SAlex Xie 	case CHIP_VEGA10:
12034cd4c5c0SMonk Liu 		if (amdgpu_sriov_vf(adev))
120498cad2deSTrigger Huang 			break;
120598cad2deSTrigger Huang 		/* fall through */
1206d96b428cSFeifei Xu 	case CHIP_VEGA20:
1207946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
12085c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
1209c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1210946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
12115c583018SEvan Quan 						golden_settings_athub_1_0_0,
1212c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1213e60f8db5SAlex Xie 		break;
1214273a14cdSAlex Deucher 	case CHIP_VEGA12:
1215273a14cdSAlex Deucher 		break;
1216e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
12178787ee01SHuang Rui 		/* TODO for renoir */
1218946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
12195c583018SEvan Quan 						golden_settings_athub_1_0_0,
1220c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1221e4f3abaaSChunming Zhou 		break;
1222e60f8db5SAlex Xie 	default:
1223e60f8db5SAlex Xie 		break;
1224e60f8db5SAlex Xie 	}
1225e60f8db5SAlex Xie }
1226e60f8db5SAlex Xie 
1227e60f8db5SAlex Xie /**
1228e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1229e60f8db5SAlex Xie  *
1230e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1231e60f8db5SAlex Xie  */
1232e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1233e60f8db5SAlex Xie {
1234cb1545f7SOak Zeng 	int r;
1235e60f8db5SAlex Xie 
12361123b989SChristian König 	if (adev->gart.bo == NULL) {
1237e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1238e60f8db5SAlex Xie 		return -EINVAL;
1239e60f8db5SAlex Xie 	}
1240ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
1241ce1b1b66SMonk Liu 	if (r)
1242ce1b1b66SMonk Liu 		return r;
1243e60f8db5SAlex Xie 
1244e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
1245e60f8db5SAlex Xie 	if (r)
1246e60f8db5SAlex Xie 		return r;
1247e60f8db5SAlex Xie 
124851cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
124951cce480SLe Ma 		r = mmhub_v9_4_gart_enable(adev);
125051cce480SLe Ma 	else
1251e60f8db5SAlex Xie 		r = mmhub_v1_0_gart_enable(adev);
1252e60f8db5SAlex Xie 	if (r)
1253e60f8db5SAlex Xie 		return r;
1254e60f8db5SAlex Xie 
1255cb1545f7SOak Zeng 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1256cb1545f7SOak Zeng 		 (unsigned)(adev->gmc.gart_size >> 20),
1257cb1545f7SOak Zeng 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1258cb1545f7SOak Zeng 	adev->gart.ready = true;
1259cb1545f7SOak Zeng 	return 0;
1260cb1545f7SOak Zeng }
1261cb1545f7SOak Zeng 
1262cb1545f7SOak Zeng static int gmc_v9_0_hw_init(void *handle)
1263cb1545f7SOak Zeng {
1264cb1545f7SOak Zeng 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1265cb1545f7SOak Zeng 	bool value;
1266cb1545f7SOak Zeng 	int r, i;
1267cb1545f7SOak Zeng 	u32 tmp;
1268cb1545f7SOak Zeng 
1269cb1545f7SOak Zeng 	/* The sequence of these two function calls matters.*/
1270cb1545f7SOak Zeng 	gmc_v9_0_init_golden_registers(adev);
1271cb1545f7SOak Zeng 
1272cb1545f7SOak Zeng 	if (adev->mode_info.num_crtc) {
1273cb1545f7SOak Zeng 		if (adev->asic_type != CHIP_ARCTURUS) {
1274cb1545f7SOak Zeng 			/* Lockout access through VGA aperture*/
1275cb1545f7SOak Zeng 			WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1276cb1545f7SOak Zeng 
1277cb1545f7SOak Zeng 			/* disable VGA render */
1278cb1545f7SOak Zeng 			WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1279cb1545f7SOak Zeng 		}
1280cb1545f7SOak Zeng 	}
1281cb1545f7SOak Zeng 
1282cb1545f7SOak Zeng 	amdgpu_device_program_register_sequence(adev,
1283cb1545f7SOak Zeng 						golden_settings_vega10_hdp,
1284cb1545f7SOak Zeng 						ARRAY_SIZE(golden_settings_vega10_hdp));
1285cb1545f7SOak Zeng 
1286cb1545f7SOak Zeng 	switch (adev->asic_type) {
1287cb1545f7SOak Zeng 	case CHIP_RAVEN:
1288cb1545f7SOak Zeng 		/* TODO for renoir */
1289cb1545f7SOak Zeng 		mmhub_v1_0_update_power_gating(adev, true);
1290cb1545f7SOak Zeng 		break;
1291f81b86a0SOak Zeng 	case CHIP_ARCTURUS:
1292f81b86a0SOak Zeng 		WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
1293f81b86a0SOak Zeng 		break;
1294cb1545f7SOak Zeng 	default:
1295cb1545f7SOak Zeng 		break;
1296cb1545f7SOak Zeng 	}
1297cb1545f7SOak Zeng 
1298846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1299e60f8db5SAlex Xie 
1300b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1301b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1302e60f8db5SAlex Xie 
1303fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1304fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1305fe2b5323STiecheng Zhou 
13061d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
1307bebc0762SHawking Zhang 	adev->nbio.funcs->hdp_flush(adev, NULL);
13081d4e0a8cSMonk Liu 
1309e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1310e60f8db5SAlex Xie 		value = false;
1311e60f8db5SAlex Xie 	else
1312e60f8db5SAlex Xie 		value = true;
1313e60f8db5SAlex Xie 
131420bf2f6fSZhigang Luo 	if (!amdgpu_sriov_vf(adev)) {
131508546895SZhigang Luo 		gfxhub_v1_0_set_fault_enable_default(adev, value);
131651cce480SLe Ma 		if (adev->asic_type == CHIP_ARCTURUS)
131751cce480SLe Ma 			mmhub_v9_4_set_fault_enable_default(adev, value);
131851cce480SLe Ma 		else
1319e60f8db5SAlex Xie 			mmhub_v1_0_set_fault_enable_default(adev, value);
132020bf2f6fSZhigang Luo 	}
13213ff98548SOak Zeng 	for (i = 0; i < adev->num_vmhubs; ++i)
13223ff98548SOak Zeng 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1323e60f8db5SAlex Xie 
1324e7da754bSMonk Liu 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1325e7da754bSMonk Liu 		adev->umc.funcs->init_registers(adev);
1326e7da754bSMonk Liu 
1327e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1328e60f8db5SAlex Xie 
1329e60f8db5SAlex Xie 	return r;
1330e60f8db5SAlex Xie }
1331e60f8db5SAlex Xie 
1332e60f8db5SAlex Xie /**
1333e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1334e60f8db5SAlex Xie  *
1335e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1336e60f8db5SAlex Xie  *
1337e60f8db5SAlex Xie  * This disables all VM page table.
1338e60f8db5SAlex Xie  */
1339e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1340e60f8db5SAlex Xie {
1341e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
134251cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
134351cce480SLe Ma 		mmhub_v9_4_gart_disable(adev);
134451cce480SLe Ma 	else
1345e60f8db5SAlex Xie 		mmhub_v1_0_gart_disable(adev);
1346ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1347e60f8db5SAlex Xie }
1348e60f8db5SAlex Xie 
1349e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1350e60f8db5SAlex Xie {
1351e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1352e60f8db5SAlex Xie 
13535dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
13545dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
13555dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
13565dd696aeSTrigger Huang 		return 0;
13575dd696aeSTrigger Huang 	}
13585dd696aeSTrigger Huang 
1359791c4769Sxinhui pan 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1360770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1361e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1362e60f8db5SAlex Xie 
1363e60f8db5SAlex Xie 	return 0;
1364e60f8db5SAlex Xie }
1365e60f8db5SAlex Xie 
1366e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1367e60f8db5SAlex Xie {
1368e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1369e60f8db5SAlex Xie 
1370f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1371e60f8db5SAlex Xie }
1372e60f8db5SAlex Xie 
1373e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1374e60f8db5SAlex Xie {
1375e60f8db5SAlex Xie 	int r;
1376e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1377e60f8db5SAlex Xie 
1378e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1379e60f8db5SAlex Xie 	if (r)
1380e60f8db5SAlex Xie 		return r;
1381e60f8db5SAlex Xie 
1382620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1383e60f8db5SAlex Xie 
138432601d48SChristian König 	return 0;
1385e60f8db5SAlex Xie }
1386e60f8db5SAlex Xie 
1387e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1388e60f8db5SAlex Xie {
1389e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1390e60f8db5SAlex Xie 	return true;
1391e60f8db5SAlex Xie }
1392e60f8db5SAlex Xie 
1393e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1394e60f8db5SAlex Xie {
1395e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1396e60f8db5SAlex Xie 	return 0;
1397e60f8db5SAlex Xie }
1398e60f8db5SAlex Xie 
1399e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1400e60f8db5SAlex Xie {
1401e60f8db5SAlex Xie 	/* XXX for emulation.*/
1402e60f8db5SAlex Xie 	return 0;
1403e60f8db5SAlex Xie }
1404e60f8db5SAlex Xie 
1405e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1406e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1407e60f8db5SAlex Xie {
1408d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1409d5583d4fSHuang Rui 
141051cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
1411cb15e804SLe Ma 		mmhub_v9_4_set_clockgating(adev, state);
1412cb15e804SLe Ma 	else
1413bee7b51aSLe Ma 		mmhub_v1_0_set_clockgating(adev, state);
1414bee7b51aSLe Ma 
1415bee7b51aSLe Ma 	athub_v1_0_set_clockgating(adev, state);
1416bee7b51aSLe Ma 
1417bee7b51aSLe Ma 	return 0;
1418e60f8db5SAlex Xie }
1419e60f8db5SAlex Xie 
142013052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
142113052be5SHuang Rui {
142213052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
142313052be5SHuang Rui 
142451cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
1425cb15e804SLe Ma 		mmhub_v9_4_get_clockgating(adev, flags);
1426cb15e804SLe Ma 	else
142713052be5SHuang Rui 		mmhub_v1_0_get_clockgating(adev, flags);
1428bee7b51aSLe Ma 
1429bee7b51aSLe Ma 	athub_v1_0_get_clockgating(adev, flags);
143013052be5SHuang Rui }
143113052be5SHuang Rui 
1432e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1433e60f8db5SAlex Xie 					enum amd_powergating_state state)
1434e60f8db5SAlex Xie {
1435e60f8db5SAlex Xie 	return 0;
1436e60f8db5SAlex Xie }
1437e60f8db5SAlex Xie 
1438e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1439e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1440e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1441e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1442e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1443e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1444e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1445e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1446e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1447e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1448e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1449e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1450e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1451e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1452e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
145313052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1454e60f8db5SAlex Xie };
1455e60f8db5SAlex Xie 
1456e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1457e60f8db5SAlex Xie {
1458e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1459e60f8db5SAlex Xie 	.major = 9,
1460e60f8db5SAlex Xie 	.minor = 0,
1461e60f8db5SAlex Xie 	.rev = 0,
1462e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1463e60f8db5SAlex Xie };
1464