xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 90f6452c)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23f867723bSSam Ravnborg 
24e60f8db5SAlex Xie #include <linux/firmware.h>
25f867723bSSam Ravnborg #include <linux/pci.h>
26f867723bSSam Ravnborg 
27fd5fd480SChunming Zhou #include <drm/drm_cache.h>
28f867723bSSam Ravnborg 
29e60f8db5SAlex Xie #include "amdgpu.h"
30e60f8db5SAlex Xie #include "gmc_v9_0.h"
318d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
322cddc50eSHuang Rui #include "amdgpu_gem.h"
33e60f8db5SAlex Xie 
3475199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3575199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
36cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
37135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
38135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
39fb960bd2SFeifei Xu #include "vega10_enum.h"
4065417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
416ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
42250b4228SChristian König #include "oss/osssys_4_0_offset.h"
43e60f8db5SAlex Xie 
44946a4d5bSShaoyun Liu #include "soc15.h"
45e60f8db5SAlex Xie #include "soc15_common.h"
4690c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
47e60f8db5SAlex Xie 
48e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
49e60f8db5SAlex Xie #include "mmhub_v1_0.h"
50bee7b51aSLe Ma #include "athub_v1_0.h"
51bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
5251cce480SLe Ma #include "mmhub_v9_4.h"
535b6b35aaSHawking Zhang #include "umc_v6_1.h"
54e7da754bSMonk Liu #include "umc_v6_0.h"
55e60f8db5SAlex Xie 
5644a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
5744a99b65SAndrey Grodzovsky 
58791c4769Sxinhui pan #include "amdgpu_ras.h"
59029fbd43SHawking Zhang #include "amdgpu_xgmi.h"
60791c4769Sxinhui pan 
61ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
62ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
63ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
64ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
65ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
66ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
67ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
68ebdef28eSAlex Deucher 
69e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
70e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
71e60f8db5SAlex Xie 
72e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
73e60f8db5SAlex Xie {
74e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
75e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
76e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
77e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
78e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
79e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
80e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
81e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
82e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
83e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
84e60f8db5SAlex Xie };
85e60f8db5SAlex Xie 
86946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
875c583018SEvan Quan {
88946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
89946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
905c583018SEvan Quan };
915c583018SEvan Quan 
92946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
935c583018SEvan Quan {
94946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
95946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
965c583018SEvan Quan };
975c583018SEvan Quan 
98791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
99791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
100791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
101791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
102791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
103791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
104791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
105791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
106791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
107791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
108791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
109791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
110791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
111791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
112791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
113791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
114791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
115791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
116791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
117791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
118791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
119791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
120791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
121791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
122791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
123791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
124791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
125791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
126791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
127791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
128791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
129791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
130791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
13102bab923SDavid Panariti };
13202bab923SDavid Panariti 
133791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
134791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
135791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
136791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
137791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
138791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
139791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
140791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
141791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
142791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
143791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
144791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
145791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
146791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
147791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
148791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
149791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
150791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
151791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
152791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
153791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
154791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
155791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
156791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
157791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
158791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
159791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
160791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
161791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
162791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
163791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
164791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
165791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
16602bab923SDavid Panariti };
16702bab923SDavid Panariti 
168791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_status_addrs[] = {
169791c4769Sxinhui pan 	(0x000143c2 + 0x00000000),
170791c4769Sxinhui pan 	(0x000143c2 + 0x00000800),
171791c4769Sxinhui pan 	(0x000143c2 + 0x00001000),
172791c4769Sxinhui pan 	(0x000143c2 + 0x00001800),
173791c4769Sxinhui pan 	(0x000543c2 + 0x00000000),
174791c4769Sxinhui pan 	(0x000543c2 + 0x00000800),
175791c4769Sxinhui pan 	(0x000543c2 + 0x00001000),
176791c4769Sxinhui pan 	(0x000543c2 + 0x00001800),
177791c4769Sxinhui pan 	(0x000943c2 + 0x00000000),
178791c4769Sxinhui pan 	(0x000943c2 + 0x00000800),
179791c4769Sxinhui pan 	(0x000943c2 + 0x00001000),
180791c4769Sxinhui pan 	(0x000943c2 + 0x00001800),
181791c4769Sxinhui pan 	(0x000d43c2 + 0x00000000),
182791c4769Sxinhui pan 	(0x000d43c2 + 0x00000800),
183791c4769Sxinhui pan 	(0x000d43c2 + 0x00001000),
184791c4769Sxinhui pan 	(0x000d43c2 + 0x00001800),
185791c4769Sxinhui pan 	(0x001143c2 + 0x00000000),
186791c4769Sxinhui pan 	(0x001143c2 + 0x00000800),
187791c4769Sxinhui pan 	(0x001143c2 + 0x00001000),
188791c4769Sxinhui pan 	(0x001143c2 + 0x00001800),
189791c4769Sxinhui pan 	(0x001543c2 + 0x00000000),
190791c4769Sxinhui pan 	(0x001543c2 + 0x00000800),
191791c4769Sxinhui pan 	(0x001543c2 + 0x00001000),
192791c4769Sxinhui pan 	(0x001543c2 + 0x00001800),
193791c4769Sxinhui pan 	(0x001943c2 + 0x00000000),
194791c4769Sxinhui pan 	(0x001943c2 + 0x00000800),
195791c4769Sxinhui pan 	(0x001943c2 + 0x00001000),
196791c4769Sxinhui pan 	(0x001943c2 + 0x00001800),
197791c4769Sxinhui pan 	(0x001d43c2 + 0x00000000),
198791c4769Sxinhui pan 	(0x001d43c2 + 0x00000800),
199791c4769Sxinhui pan 	(0x001d43c2 + 0x00001000),
200791c4769Sxinhui pan 	(0x001d43c2 + 0x00001800),
20102bab923SDavid Panariti };
20202bab923SDavid Panariti 
203791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
204791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
205791c4769Sxinhui pan 		unsigned type,
206791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
207791c4769Sxinhui pan {
208791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
209791c4769Sxinhui pan 
210791c4769Sxinhui pan 	bits = 0x7f;
211791c4769Sxinhui pan 
212791c4769Sxinhui pan 	switch (state) {
213791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
214791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
215791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
216791c4769Sxinhui pan 			tmp = RREG32(reg);
217791c4769Sxinhui pan 			tmp &= ~bits;
218791c4769Sxinhui pan 			WREG32(reg, tmp);
219791c4769Sxinhui pan 		}
220791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
221791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
222791c4769Sxinhui pan 			tmp = RREG32(reg);
223791c4769Sxinhui pan 			tmp &= ~bits;
224791c4769Sxinhui pan 			WREG32(reg, tmp);
225791c4769Sxinhui pan 		}
226791c4769Sxinhui pan 		break;
227791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
228791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
229791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
230791c4769Sxinhui pan 			tmp = RREG32(reg);
231791c4769Sxinhui pan 			tmp |= bits;
232791c4769Sxinhui pan 			WREG32(reg, tmp);
233791c4769Sxinhui pan 		}
234791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
235791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
236791c4769Sxinhui pan 			tmp = RREG32(reg);
237791c4769Sxinhui pan 			tmp |= bits;
238791c4769Sxinhui pan 			WREG32(reg, tmp);
239791c4769Sxinhui pan 		}
240791c4769Sxinhui pan 		break;
241791c4769Sxinhui pan 	default:
242791c4769Sxinhui pan 		break;
243791c4769Sxinhui pan 	}
244791c4769Sxinhui pan 
245791c4769Sxinhui pan 	return 0;
246791c4769Sxinhui pan }
247791c4769Sxinhui pan 
248e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
249e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
250e60f8db5SAlex Xie 					unsigned type,
251e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
252e60f8db5SAlex Xie {
253e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
254ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
255e60f8db5SAlex Xie 
25611250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
25711250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
25811250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
25911250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
26011250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
26111250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
26211250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
26311250164SChristian König 
264e60f8db5SAlex Xie 	switch (state) {
265e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
2661daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
267ae6d1416STom St Denis 			hub = &adev->vmhub[j];
268e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
269e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
270e60f8db5SAlex Xie 				tmp = RREG32(reg);
271e60f8db5SAlex Xie 				tmp &= ~bits;
272e60f8db5SAlex Xie 				WREG32(reg, tmp);
273e60f8db5SAlex Xie 			}
274e60f8db5SAlex Xie 		}
275e60f8db5SAlex Xie 		break;
276e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
2771daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
278ae6d1416STom St Denis 			hub = &adev->vmhub[j];
279e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
280e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
281e60f8db5SAlex Xie 				tmp = RREG32(reg);
282e60f8db5SAlex Xie 				tmp |= bits;
283e60f8db5SAlex Xie 				WREG32(reg, tmp);
284e60f8db5SAlex Xie 			}
285e60f8db5SAlex Xie 		}
286e60f8db5SAlex Xie 	default:
287e60f8db5SAlex Xie 		break;
288e60f8db5SAlex Xie 	}
289e60f8db5SAlex Xie 
290e60f8db5SAlex Xie 	return 0;
291e60f8db5SAlex Xie }
292e60f8db5SAlex Xie 
293e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
294e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
295e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
296e60f8db5SAlex Xie {
29751c60898SLe Ma 	struct amdgpu_vmhub *hub;
298c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
2994d6cbde3SFelix Kuehling 	uint32_t status = 0;
300e60f8db5SAlex Xie 	u64 addr;
30151c60898SLe Ma 	char hub_name[10];
302e60f8db5SAlex Xie 
303e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
304e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
305e60f8db5SAlex Xie 
306c1a8abd9SChristian König 	if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
307c1a8abd9SChristian König 						    entry->timestamp))
30822666cc1SChristian König 		return 1; /* This also prevents sending it to KFD */
30922666cc1SChristian König 
31051c60898SLe Ma 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
31151c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub0");
31251c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
31351c60898SLe Ma 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
31451c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub1");
31551c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_1];
31651c60898SLe Ma 	} else {
31751c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "gfxhub0");
31851c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
31951c60898SLe Ma 	}
32051c60898SLe Ma 
321c1a8abd9SChristian König 	/* If it's the first fault for this address, process it normally */
322ec671737SChristian König 	if (retry_fault && !in_interrupt() &&
323ec671737SChristian König 	    amdgpu_vm_handle_fault(adev, entry->pasid, addr))
324ec671737SChristian König 		return 1; /* This also prevents sending it to KFD */
325ec671737SChristian König 
32679a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
32753499173SXiaojie Yuan 		/*
32853499173SXiaojie Yuan 		 * Issue a dummy read to wait for the status register to
32953499173SXiaojie Yuan 		 * be updated to avoid reading an incorrect value due to
33053499173SXiaojie Yuan 		 * the new fast GRBM interface.
33153499173SXiaojie Yuan 		 */
33253499173SXiaojie Yuan 		if (entry->vmid_src == AMDGPU_GFXHUB_0)
33353499173SXiaojie Yuan 			RREG32(hub->vm_l2_pro_fault_status);
33453499173SXiaojie Yuan 
3355a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
3365a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
3374d6cbde3SFelix Kuehling 	}
338e60f8db5SAlex Xie 
3394d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
34005794effSShirish S 		struct amdgpu_task_info task_info;
341efaa9646SAndrey Grodzovsky 
34205794effSShirish S 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
343efaa9646SAndrey Grodzovsky 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
344efaa9646SAndrey Grodzovsky 
3454d6cbde3SFelix Kuehling 		dev_err(adev->dev,
346c468f9e2SChristian König 			"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
347c468f9e2SChristian König 			"pasid:%u, for process %s pid %d thread %s pid %d)\n",
34851c60898SLe Ma 			hub_name, retry_fault ? "retry" : "no-retry",
349c4f46f22SChristian König 			entry->src_id, entry->ring_id, entry->vmid,
350efaa9646SAndrey Grodzovsky 			entry->pasid, task_info.process_name, task_info.tgid,
351efaa9646SAndrey Grodzovsky 			task_info.task_name, task_info.pid);
3525ddd4a9aSYong Zhao 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
35379a0c465SMonk Liu 			addr, entry->client_id);
3545ddd4a9aSYong Zhao 		if (!amdgpu_sriov_vf(adev)) {
3554d6cbde3SFelix Kuehling 			dev_err(adev->dev,
3564d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
3574d6cbde3SFelix Kuehling 				status);
3585ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
3595ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3605ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
3615ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
3625ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3635ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
3645ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
3655ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3665ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
3675ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
3685ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3695ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
3704e0ae5e2SYong Zhao 			dev_err(adev->dev, "\t RW: 0x%lx\n",
3714e0ae5e2SYong Zhao 				REG_GET_FIELD(status,
3724e0ae5e2SYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, RW));
3735ddd4a9aSYong Zhao 
3745ddd4a9aSYong Zhao 		}
37579a0c465SMonk Liu 	}
376e60f8db5SAlex Xie 
377e60f8db5SAlex Xie 	return 0;
378e60f8db5SAlex Xie }
379e60f8db5SAlex Xie 
380e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
381e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
382e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
383e60f8db5SAlex Xie };
384e60f8db5SAlex Xie 
385791c4769Sxinhui pan 
386791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
387791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
38834cc4fd9STao Zhou 	.process = amdgpu_umc_process_ecc_irq,
389791c4769Sxinhui pan };
390791c4769Sxinhui pan 
391e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
392e60f8db5SAlex Xie {
393770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
394770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
395791c4769Sxinhui pan 
396791c4769Sxinhui pan 	adev->gmc.ecc_irq.num_types = 1;
397791c4769Sxinhui pan 	adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
398e60f8db5SAlex Xie }
399e60f8db5SAlex Xie 
4002a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
4012a79d868SYong Zhao 					uint32_t flush_type)
40203f89febSChristian König {
40303f89febSChristian König 	u32 req = 0;
40403f89febSChristian König 
40503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
406c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
4072a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
40803f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
40903f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
41003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
41103f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
41203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
41303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
41403f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
41503f89febSChristian König 
41603f89febSChristian König 	return req;
41703f89febSChristian König }
41803f89febSChristian König 
41990f6452cSchangzhu /**
42090f6452cSchangzhu  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
42190f6452cSchangzhu  *
42290f6452cSchangzhu  * @adev: amdgpu_device pointer
42390f6452cSchangzhu  * @vmhub: vmhub type
42490f6452cSchangzhu  *
42590f6452cSchangzhu  */
42690f6452cSchangzhu static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
42790f6452cSchangzhu 				       uint32_t vmhub)
42890f6452cSchangzhu {
42990f6452cSchangzhu 	return ((vmhub == AMDGPU_MMHUB_0 ||
43090f6452cSchangzhu 		 vmhub == AMDGPU_MMHUB_1) &&
43190f6452cSchangzhu 		(!amdgpu_sriov_vf(adev)) &&
43290f6452cSchangzhu 		(!(adev->asic_type == CHIP_RAVEN &&
43390f6452cSchangzhu 		   adev->rev_id < 0x8 &&
43490f6452cSchangzhu 		   adev->pdev->device == 0x15d8)));
43590f6452cSchangzhu }
43690f6452cSchangzhu 
437e60f8db5SAlex Xie /*
438e60f8db5SAlex Xie  * GART
439e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
440e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
441e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
442e60f8db5SAlex Xie  */
443e60f8db5SAlex Xie 
444e60f8db5SAlex Xie /**
4452a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
446e60f8db5SAlex Xie  *
447e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
448e60f8db5SAlex Xie  * @vmid: vm instance to flush
4492a79d868SYong Zhao  * @flush_type: the flush type
450e60f8db5SAlex Xie  *
4512a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
452e60f8db5SAlex Xie  */
4533ff98548SOak Zeng static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
4543ff98548SOak Zeng 					uint32_t vmhub, uint32_t flush_type)
455e60f8db5SAlex Xie {
45690f6452cSchangzhu 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
457e60f8db5SAlex Xie 	const unsigned eng = 17;
4583ff98548SOak Zeng 	u32 j, tmp;
4593ff98548SOak Zeng 	struct amdgpu_vmhub *hub;
460e60f8db5SAlex Xie 
4613ff98548SOak Zeng 	BUG_ON(vmhub >= adev->num_vmhubs);
4623ff98548SOak Zeng 
4633ff98548SOak Zeng 	hub = &adev->vmhub[vmhub];
4643ff98548SOak Zeng 	tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
465e60f8db5SAlex Xie 
46682d1a1b1SChengming Gui 	/* This is necessary for a HW workaround under SRIOV as well
46782d1a1b1SChengming Gui 	 * as GFXOFF under bare metal
46882d1a1b1SChengming Gui 	 */
46982d1a1b1SChengming Gui 	if (adev->gfx.kiq.ring.sched.ready &&
47082d1a1b1SChengming Gui 			(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
47182d1a1b1SChengming Gui 			!adev->in_gpu_reset) {
472af5fe1e9SChristian König 		uint32_t req = hub->vm_inv_eng0_req + eng;
473af5fe1e9SChristian König 		uint32_t ack = hub->vm_inv_eng0_ack + eng;
474af5fe1e9SChristian König 
475af5fe1e9SChristian König 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
476af5fe1e9SChristian König 				1 << vmid);
4773ff98548SOak Zeng 		return;
478fc0faf04SEmily Deng 	}
4793890d111SEmily Deng 
4803890d111SEmily Deng 	spin_lock(&adev->gmc.invalidate_lock);
481f920d1bbSchangzhu 
482f920d1bbSchangzhu 	/*
483f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
484f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
485f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
486f920d1bbSchangzhu 	 * to WA the Issue
487f920d1bbSchangzhu 	 */
488f920d1bbSchangzhu 
489f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
49090f6452cSchangzhu 	if (use_semaphore) {
491f920d1bbSchangzhu 		for (j = 0; j < adev->usec_timeout; j++) {
492f920d1bbSchangzhu 			/* a read return value of 1 means semaphore acuqire */
493f920d1bbSchangzhu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
494f920d1bbSchangzhu 			if (tmp & 0x1)
495f920d1bbSchangzhu 				break;
496f920d1bbSchangzhu 			udelay(1);
497f920d1bbSchangzhu 		}
498f920d1bbSchangzhu 
499f920d1bbSchangzhu 		if (j >= adev->usec_timeout)
500f920d1bbSchangzhu 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
501f920d1bbSchangzhu 	}
502f920d1bbSchangzhu 
503c7a7266bSXiangliang Yu 	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
50453499173SXiaojie Yuan 
50553499173SXiaojie Yuan 	/*
50653499173SXiaojie Yuan 	 * Issue a dummy read to wait for the ACK register to be cleared
50753499173SXiaojie Yuan 	 * to avoid a false ACK due to the new fast GRBM interface.
50853499173SXiaojie Yuan 	 */
50953499173SXiaojie Yuan 	if (vmhub == AMDGPU_GFXHUB_0)
51053499173SXiaojie Yuan 		RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
51153499173SXiaojie Yuan 
512e60f8db5SAlex Xie 	for (j = 0; j < adev->usec_timeout; j++) {
513c7a7266bSXiangliang Yu 		tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
514396557b0SChristian König 		if (tmp & (1 << vmid))
515e60f8db5SAlex Xie 			break;
516e60f8db5SAlex Xie 		udelay(1);
517e60f8db5SAlex Xie 	}
518f920d1bbSchangzhu 
519f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
52090f6452cSchangzhu 	if (use_semaphore)
521f920d1bbSchangzhu 		/*
522f920d1bbSchangzhu 		 * add semaphore release after invalidation,
523f920d1bbSchangzhu 		 * write with 0 means semaphore release
524f920d1bbSchangzhu 		 */
525f920d1bbSchangzhu 		WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
526f920d1bbSchangzhu 
5273890d111SEmily Deng 	spin_unlock(&adev->gmc.invalidate_lock);
528f920d1bbSchangzhu 
529396557b0SChristian König 	if (j < adev->usec_timeout)
5303ff98548SOak Zeng 		return;
531396557b0SChristian König 
532e60f8db5SAlex Xie 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
533e60f8db5SAlex Xie }
534e60f8db5SAlex Xie 
5359096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
536c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
5379096d6e5SChristian König {
53890f6452cSchangzhu 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
539250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
540250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
5412a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
5429096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
5439096d6e5SChristian König 
544f920d1bbSchangzhu 	/*
545f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
546f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
547f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
548f920d1bbSchangzhu 	 * to WA the Issue
549f920d1bbSchangzhu 	 */
550f920d1bbSchangzhu 
551f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
55290f6452cSchangzhu 	if (use_semaphore)
553f920d1bbSchangzhu 		/* a read return value of 1 means semaphore acuqire */
554f920d1bbSchangzhu 		amdgpu_ring_emit_reg_wait(ring,
555f920d1bbSchangzhu 					  hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
556f920d1bbSchangzhu 
5579096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
5589096d6e5SChristian König 			      lower_32_bits(pd_addr));
5599096d6e5SChristian König 
5609096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
5619096d6e5SChristian König 			      upper_32_bits(pd_addr));
5629096d6e5SChristian König 
563f8bc9037SAlex Deucher 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
564f8bc9037SAlex Deucher 					    hub->vm_inv_eng0_ack + eng,
565f8bc9037SAlex Deucher 					    req, 1 << vmid);
566f732b6b3SChristian König 
567f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
56890f6452cSchangzhu 	if (use_semaphore)
569f920d1bbSchangzhu 		/*
570f920d1bbSchangzhu 		 * add semaphore release after invalidation,
571f920d1bbSchangzhu 		 * write with 0 means semaphore release
572f920d1bbSchangzhu 		 */
573f920d1bbSchangzhu 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
574f920d1bbSchangzhu 
5759096d6e5SChristian König 	return pd_addr;
5769096d6e5SChristian König }
5779096d6e5SChristian König 
578c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
579c633c00bSChristian König 					unsigned pasid)
580c633c00bSChristian König {
581c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
582c633c00bSChristian König 	uint32_t reg;
583c633c00bSChristian König 
584f2d66571SLe Ma 	/* Do nothing because there's no lut register for mmhub1. */
585f2d66571SLe Ma 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
586f2d66571SLe Ma 		return;
587f2d66571SLe Ma 
588a2d15ed7SLe Ma 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
589c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
590c633c00bSChristian König 	else
591c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
592c633c00bSChristian König 
593c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
594c633c00bSChristian König }
595c633c00bSChristian König 
596e60f8db5SAlex Xie /*
597e60f8db5SAlex Xie  * PTE format on VEGA 10:
598e60f8db5SAlex Xie  * 63:59 reserved
599e60f8db5SAlex Xie  * 58:57 mtype
600e60f8db5SAlex Xie  * 56 F
601e60f8db5SAlex Xie  * 55 L
602e60f8db5SAlex Xie  * 54 P
603e60f8db5SAlex Xie  * 53 SW
604e60f8db5SAlex Xie  * 52 T
605e60f8db5SAlex Xie  * 50:48 reserved
606e60f8db5SAlex Xie  * 47:12 4k physical page base address
607e60f8db5SAlex Xie  * 11:7 fragment
608e60f8db5SAlex Xie  * 6 write
609e60f8db5SAlex Xie  * 5 read
610e60f8db5SAlex Xie  * 4 exe
611e60f8db5SAlex Xie  * 3 Z
612e60f8db5SAlex Xie  * 2 snooped
613e60f8db5SAlex Xie  * 1 system
614e60f8db5SAlex Xie  * 0 valid
615e60f8db5SAlex Xie  *
616e60f8db5SAlex Xie  * PDE format on VEGA 10:
617e60f8db5SAlex Xie  * 63:59 block fragment size
618e60f8db5SAlex Xie  * 58:55 reserved
619e60f8db5SAlex Xie  * 54 P
620e60f8db5SAlex Xie  * 53:48 reserved
621e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
622e60f8db5SAlex Xie  * 5:3 reserved
623e60f8db5SAlex Xie  * 2 C
624e60f8db5SAlex Xie  * 1 system
625e60f8db5SAlex Xie  * 0 valid
626e60f8db5SAlex Xie  */
627e60f8db5SAlex Xie 
62871776b6dSChristian König static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
629e60f8db5SAlex Xie 
630e60f8db5SAlex Xie {
63171776b6dSChristian König 	switch (flags) {
632e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
63371776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
634e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
63571776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
636e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
63771776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
638093e48c0SOak Zeng 	case AMDGPU_VM_MTYPE_RW:
63971776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
640e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
64171776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
642e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
64371776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
644e60f8db5SAlex Xie 	default:
64571776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
646e60f8db5SAlex Xie 	}
647e60f8db5SAlex Xie }
648e60f8db5SAlex Xie 
6493de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
6503de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
651f75e237cSChristian König {
652bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
6533de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
654770d13b1SChristian König 			adev->gmc.vram_start;
6553de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
6566a42fd6fSChristian König 
657770d13b1SChristian König 	if (!adev->gmc.translate_further)
6586a42fd6fSChristian König 		return;
6596a42fd6fSChristian König 
6606a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
6616a42fd6fSChristian König 		/* Set the block fragment size */
6626a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
6636a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
6646a42fd6fSChristian König 
6656a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
6666a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
6676a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
6686a42fd6fSChristian König 		else
6696a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
6706a42fd6fSChristian König 	}
671f75e237cSChristian König }
672f75e237cSChristian König 
673cbfae36cSChristian König static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
674cbfae36cSChristian König 				struct amdgpu_bo_va_mapping *mapping,
675cbfae36cSChristian König 				uint64_t *flags)
676cbfae36cSChristian König {
677cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
678cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
679cbfae36cSChristian König 
680cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
681cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
682cbfae36cSChristian König 
683cbfae36cSChristian König 	if (mapping->flags & AMDGPU_PTE_PRT) {
684cbfae36cSChristian König 		*flags |= AMDGPU_PTE_PRT;
685cbfae36cSChristian König 		*flags &= ~AMDGPU_PTE_VALID;
686cbfae36cSChristian König 	}
687cbfae36cSChristian König 
688cbfae36cSChristian König 	if (adev->asic_type == CHIP_ARCTURUS &&
689cbfae36cSChristian König 	    !(*flags & AMDGPU_PTE_SYSTEM) &&
690cbfae36cSChristian König 	    mapping->bo_va->is_xgmi)
691cbfae36cSChristian König 		*flags |= AMDGPU_PTE_SNOOPED;
692cbfae36cSChristian König }
693cbfae36cSChristian König 
694132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
695132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
6969096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
697c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
69871776b6dSChristian König 	.map_mtype = gmc_v9_0_map_mtype,
699cbfae36cSChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde,
700cbfae36cSChristian König 	.get_vm_pte = gmc_v9_0_get_vm_pte
701e60f8db5SAlex Xie };
702e60f8db5SAlex Xie 
703132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
704e60f8db5SAlex Xie {
705132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
706e60f8db5SAlex Xie }
707e60f8db5SAlex Xie 
7085b6b35aaSHawking Zhang static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
7095b6b35aaSHawking Zhang {
7105b6b35aaSHawking Zhang 	switch (adev->asic_type) {
711e7da754bSMonk Liu 	case CHIP_VEGA10:
712e7da754bSMonk Liu 		adev->umc.funcs = &umc_v6_0_funcs;
713e7da754bSMonk Liu 		break;
7145b6b35aaSHawking Zhang 	case CHIP_VEGA20:
7153aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
7163aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
7173aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
7183aacf4eaSTao Zhou 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET;
7193aacf4eaSTao Zhou 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
720045c0216STao Zhou 		adev->umc.funcs = &umc_v6_1_funcs;
7215b6b35aaSHawking Zhang 		break;
7225b6b35aaSHawking Zhang 	default:
7235b6b35aaSHawking Zhang 		break;
7245b6b35aaSHawking Zhang 	}
7255b6b35aaSHawking Zhang }
7265b6b35aaSHawking Zhang 
7273d093da0STao Zhou static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
7283d093da0STao Zhou {
7293d093da0STao Zhou 	switch (adev->asic_type) {
7303d093da0STao Zhou 	case CHIP_VEGA20:
731d65bf1f8STao Zhou 		adev->mmhub.funcs = &mmhub_v1_0_funcs;
7323d093da0STao Zhou 		break;
7333d093da0STao Zhou 	default:
7343d093da0STao Zhou 		break;
7353d093da0STao Zhou 	}
7363d093da0STao Zhou }
7373d093da0STao Zhou 
738e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
739e60f8db5SAlex Xie {
740e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
741e60f8db5SAlex Xie 
742132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
743e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
7445b6b35aaSHawking Zhang 	gmc_v9_0_set_umc_funcs(adev);
7453d093da0STao Zhou 	gmc_v9_0_set_mmhub_funcs(adev);
746e60f8db5SAlex Xie 
747770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
748770d13b1SChristian König 	adev->gmc.shared_aperture_end =
749770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
750bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
751770d13b1SChristian König 	adev->gmc.private_aperture_end =
752770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
753a7ea6548SAlex Deucher 
754e60f8db5SAlex Xie 	return 0;
755e60f8db5SAlex Xie }
756e60f8db5SAlex Xie 
757cd2b5623SAlex Deucher static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
758cd2b5623SAlex Deucher {
759cd2b5623SAlex Deucher 
760cd2b5623SAlex Deucher 	/*
761cd2b5623SAlex Deucher 	 * TODO:
762cd2b5623SAlex Deucher 	 * Currently there is a bug where some memory client outside
763cd2b5623SAlex Deucher 	 * of the driver writes to first 8M of VRAM on S3 resume,
764cd2b5623SAlex Deucher 	 * this overrides GART which by default gets placed in first 8M and
765cd2b5623SAlex Deucher 	 * causes VM_FAULTS once GTT is accessed.
766cd2b5623SAlex Deucher 	 * Keep the stolen memory reservation until the while this is not solved.
767cd2b5623SAlex Deucher 	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
768cd2b5623SAlex Deucher 	 */
769cd2b5623SAlex Deucher 	switch (adev->asic_type) {
77095010ba7SAlex Deucher 	case CHIP_VEGA10:
7716abc0c8fSAlex Deucher 	case CHIP_RAVEN:
772bfa3a9bbSHawking Zhang 	case CHIP_ARCTURUS:
7738787ee01SHuang Rui 	case CHIP_RENOIR:
77402122753SFlora Cui 		return true;
7756abc0c8fSAlex Deucher 	case CHIP_VEGA12:
776cd2b5623SAlex Deucher 	case CHIP_VEGA20:
777cd2b5623SAlex Deucher 	default:
7786abc0c8fSAlex Deucher 		return false;
779cd2b5623SAlex Deucher 	}
780cd2b5623SAlex Deucher }
781cd2b5623SAlex Deucher 
782c713a461SEvan Quan static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
783c713a461SEvan Quan {
784c713a461SEvan Quan 	struct amdgpu_ring *ring;
785c713a461SEvan Quan 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
786c8a6e2a3SLe Ma 		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
787c8a6e2a3SLe Ma 		GFXHUB_FREE_VM_INV_ENGS_BITMAP};
788c713a461SEvan Quan 	unsigned i;
789c713a461SEvan Quan 	unsigned vmhub, inv_eng;
790c713a461SEvan Quan 
791c713a461SEvan Quan 	for (i = 0; i < adev->num_rings; ++i) {
792c713a461SEvan Quan 		ring = adev->rings[i];
793c713a461SEvan Quan 		vmhub = ring->funcs->vmhub;
794c713a461SEvan Quan 
795c713a461SEvan Quan 		inv_eng = ffs(vm_inv_engs[vmhub]);
796c713a461SEvan Quan 		if (!inv_eng) {
797c713a461SEvan Quan 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
798c713a461SEvan Quan 				ring->name);
799c713a461SEvan Quan 			return -EINVAL;
800c713a461SEvan Quan 		}
801c713a461SEvan Quan 
802c713a461SEvan Quan 		ring->vm_inv_eng = inv_eng - 1;
80372464382SChristian König 		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
804c713a461SEvan Quan 
805c713a461SEvan Quan 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
806c713a461SEvan Quan 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
807c713a461SEvan Quan 	}
808c713a461SEvan Quan 
809c713a461SEvan Quan 	return 0;
810c713a461SEvan Quan }
811c713a461SEvan Quan 
812e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
813e60f8db5SAlex Xie {
814e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
815c5b6e585STao Zhou 	int r;
8164789c463SChristian König 
817cd2b5623SAlex Deucher 	if (!gmc_v9_0_keep_stolen_memory(adev))
818cd2b5623SAlex Deucher 		amdgpu_bo_late_init(adev);
8196f752ec2SAndrey Grodzovsky 
820c713a461SEvan Quan 	r = gmc_v9_0_allocate_vm_inv_eng(adev);
821c713a461SEvan Quan 	if (r)
822c713a461SEvan Quan 		return r;
823f49ea9f8SHawking Zhang 	/* Check if ecc is available */
824f49ea9f8SHawking Zhang 	if (!amdgpu_sriov_vf(adev)) {
825f49ea9f8SHawking Zhang 		switch (adev->asic_type) {
826f49ea9f8SHawking Zhang 		case CHIP_VEGA10:
827f49ea9f8SHawking Zhang 		case CHIP_VEGA20:
828f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_mem_ecc_supported(adev);
829f49ea9f8SHawking Zhang 			if (!r) {
83002bab923SDavid Panariti 				DRM_INFO("ECC is not present.\n");
831f49ea9f8SHawking Zhang 				if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
832e1d1a772SAlex Deucher 					adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
83302bab923SDavid Panariti 			} else {
834f49ea9f8SHawking Zhang 				DRM_INFO("ECC is active.\n");
835f49ea9f8SHawking Zhang 			}
836f49ea9f8SHawking Zhang 
837f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_sram_ecc_supported(adev);
838f49ea9f8SHawking Zhang 			if (!r) {
839f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is not present.\n");
840f49ea9f8SHawking Zhang 			} else {
841f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is active.\n");
842f49ea9f8SHawking Zhang 			}
843f49ea9f8SHawking Zhang 			break;
844f49ea9f8SHawking Zhang 		default:
845f49ea9f8SHawking Zhang 			break;
84602bab923SDavid Panariti 		}
8475ba4fa35SAlex Deucher 	}
84802bab923SDavid Panariti 
849ba083492STao Zhou 	r = amdgpu_gmc_ras_late_init(adev);
850791c4769Sxinhui pan 	if (r)
851e60f8db5SAlex Xie 		return r;
852e60f8db5SAlex Xie 
853770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
854e60f8db5SAlex Xie }
855e60f8db5SAlex Xie 
856e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
857770d13b1SChristian König 					struct amdgpu_gmc *mc)
858e60f8db5SAlex Xie {
859e60f8db5SAlex Xie 	u64 base = 0;
8609d4f837aSFrank.Min 
86151cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
86251cce480SLe Ma 		base = mmhub_v9_4_get_fb_location(adev);
8639d4f837aSFrank.Min 	else if (!amdgpu_sriov_vf(adev))
864e60f8db5SAlex Xie 		base = mmhub_v1_0_get_fb_location(adev);
8659d4f837aSFrank.Min 
8666fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
8676fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
86883afe835SOak Zeng 	amdgpu_gmc_vram_location(adev, mc, base);
869961c75cfSChristian König 	amdgpu_gmc_gart_location(adev, mc);
870c3e1b43cSChristian König 	amdgpu_gmc_agp_location(adev, mc);
871e60f8db5SAlex Xie 	/* base offset of vram pages */
872e60f8db5SAlex Xie 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
8736fdd68b1SAlex Deucher 
8746fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
8756fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
8766fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
877e60f8db5SAlex Xie }
878e60f8db5SAlex Xie 
879e60f8db5SAlex Xie /**
880e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
881e60f8db5SAlex Xie  *
882e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
883e60f8db5SAlex Xie  *
884e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
885e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
886e60f8db5SAlex Xie  * Returns 0 for success.
887e60f8db5SAlex Xie  */
888e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
889e60f8db5SAlex Xie {
890e60f8db5SAlex Xie 	int r;
891e60f8db5SAlex Xie 
892e60f8db5SAlex Xie 	/* size in MB on si */
893770d13b1SChristian König 	adev->gmc.mc_vram_size =
894bebc0762SHawking Zhang 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
895770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
896e60f8db5SAlex Xie 
897e60f8db5SAlex Xie 	if (!(adev->flags & AMD_IS_APU)) {
898e60f8db5SAlex Xie 		r = amdgpu_device_resize_fb_bar(adev);
899e60f8db5SAlex Xie 		if (r)
900e60f8db5SAlex Xie 			return r;
901e60f8db5SAlex Xie 	}
902770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
903770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
904e60f8db5SAlex Xie 
905156a81beSChunming Zhou #ifdef CONFIG_X86_64
906156a81beSChunming Zhou 	if (adev->flags & AMD_IS_APU) {
907156a81beSChunming Zhou 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
908156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
909156a81beSChunming Zhou 	}
910156a81beSChunming Zhou #endif
911e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
912770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
913770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
914770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
915e60f8db5SAlex Xie 
916e60f8db5SAlex Xie 	/* set the gart size */
917e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1) {
918e60f8db5SAlex Xie 		switch (adev->asic_type) {
919e60f8db5SAlex Xie 		case CHIP_VEGA10:  /* all engines support GPUVM */
920273a14cdSAlex Deucher 		case CHIP_VEGA12:  /* all engines support GPUVM */
921d96b428cSFeifei Xu 		case CHIP_VEGA20:
9223de2ff5dSLe Ma 		case CHIP_ARCTURUS:
923e60f8db5SAlex Xie 		default:
924fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
925e60f8db5SAlex Xie 			break;
926e60f8db5SAlex Xie 		case CHIP_RAVEN:   /* DCE SG support */
9278787ee01SHuang Rui 		case CHIP_RENOIR:
928770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
929e60f8db5SAlex Xie 			break;
930e60f8db5SAlex Xie 		}
931e60f8db5SAlex Xie 	} else {
932770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
933e60f8db5SAlex Xie 	}
934e60f8db5SAlex Xie 
935770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
936e60f8db5SAlex Xie 
937e60f8db5SAlex Xie 	return 0;
938e60f8db5SAlex Xie }
939e60f8db5SAlex Xie 
940e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
941e60f8db5SAlex Xie {
942e60f8db5SAlex Xie 	int r;
943e60f8db5SAlex Xie 
9441123b989SChristian König 	if (adev->gart.bo) {
945e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
946e60f8db5SAlex Xie 		return 0;
947e60f8db5SAlex Xie 	}
948e60f8db5SAlex Xie 	/* Initialize common gart structure */
949e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
950e60f8db5SAlex Xie 	if (r)
951e60f8db5SAlex Xie 		return r;
952e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
9537596ab68SHawking Zhang 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
954e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
955e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
956e60f8db5SAlex Xie }
957e60f8db5SAlex Xie 
958ebdef28eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
959ebdef28eSAlex Deucher {
960bfa3a9bbSHawking Zhang 	u32 d1vga_control;
961ebdef28eSAlex Deucher 	unsigned size;
962ebdef28eSAlex Deucher 
9636f752ec2SAndrey Grodzovsky 	/*
9646f752ec2SAndrey Grodzovsky 	 * TODO Remove once GART corruption is resolved
9656f752ec2SAndrey Grodzovsky 	 * Check related code in gmc_v9_0_sw_fini
9666f752ec2SAndrey Grodzovsky 	 * */
967cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
968cd2b5623SAlex Deucher 		return 9 * 1024 * 1024;
9696f752ec2SAndrey Grodzovsky 
970bfa3a9bbSHawking Zhang 	d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
971ebdef28eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
972ebdef28eSAlex Deucher 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
973ebdef28eSAlex Deucher 	} else {
974ebdef28eSAlex Deucher 		u32 viewport;
975ebdef28eSAlex Deucher 
976ebdef28eSAlex Deucher 		switch (adev->asic_type) {
977ebdef28eSAlex Deucher 		case CHIP_RAVEN:
9788787ee01SHuang Rui 		case CHIP_RENOIR:
979ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
980ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
981ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
982ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport,
983ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
984ebdef28eSAlex Deucher 				4);
985ebdef28eSAlex Deucher 			break;
986ebdef28eSAlex Deucher 		case CHIP_VEGA10:
987ebdef28eSAlex Deucher 		case CHIP_VEGA12:
988cd2b5623SAlex Deucher 		case CHIP_VEGA20:
989ebdef28eSAlex Deucher 		default:
990ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
991ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
992ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
993ebdef28eSAlex Deucher 				4);
994ebdef28eSAlex Deucher 			break;
995ebdef28eSAlex Deucher 		}
996ebdef28eSAlex Deucher 	}
997ebdef28eSAlex Deucher 	/* return 0 if the pre-OS buffer uses up most of vram */
998ebdef28eSAlex Deucher 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
999ebdef28eSAlex Deucher 		return 0;
10006f752ec2SAndrey Grodzovsky 
1001ebdef28eSAlex Deucher 	return size;
1002ebdef28eSAlex Deucher }
1003ebdef28eSAlex Deucher 
1004e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
1005e60f8db5SAlex Xie {
1006ad02e08eSOri Messinger 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1007e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1008e60f8db5SAlex Xie 
1009e60f8db5SAlex Xie 	gfxhub_v1_0_init(adev);
101051cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
101151cce480SLe Ma 		mmhub_v9_4_init(adev);
101251cce480SLe Ma 	else
1013e60f8db5SAlex Xie 		mmhub_v1_0_init(adev);
1014e60f8db5SAlex Xie 
1015770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
1016e60f8db5SAlex Xie 
1017ad02e08eSOri Messinger 	r = amdgpu_atomfirmware_get_vram_info(adev,
1018ad02e08eSOri Messinger 		&vram_width, &vram_type, &vram_vendor);
1019631cdbd2SAlex Deucher 	if (amdgpu_sriov_vf(adev))
1020631cdbd2SAlex Deucher 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1021631cdbd2SAlex Deucher 		 * and DF related registers is not readable, seems hardcord is the
1022631cdbd2SAlex Deucher 		 * only way to set the correct vram_width
1023631cdbd2SAlex Deucher 		 */
1024631cdbd2SAlex Deucher 		adev->gmc.vram_width = 2048;
1025631cdbd2SAlex Deucher 	else if (amdgpu_emu_mode != 1)
1026631cdbd2SAlex Deucher 		adev->gmc.vram_width = vram_width;
1027631cdbd2SAlex Deucher 
1028631cdbd2SAlex Deucher 	if (!adev->gmc.vram_width) {
1029631cdbd2SAlex Deucher 		int chansize, numchan;
1030631cdbd2SAlex Deucher 
1031631cdbd2SAlex Deucher 		/* hbm memory channel size */
1032631cdbd2SAlex Deucher 		if (adev->flags & AMD_IS_APU)
1033631cdbd2SAlex Deucher 			chansize = 64;
1034631cdbd2SAlex Deucher 		else
1035631cdbd2SAlex Deucher 			chansize = 128;
1036631cdbd2SAlex Deucher 
1037631cdbd2SAlex Deucher 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
1038631cdbd2SAlex Deucher 		adev->gmc.vram_width = numchan * chansize;
1039631cdbd2SAlex Deucher 	}
1040631cdbd2SAlex Deucher 
1041631cdbd2SAlex Deucher 	adev->gmc.vram_type = vram_type;
1042ad02e08eSOri Messinger 	adev->gmc.vram_vendor = vram_vendor;
1043e60f8db5SAlex Xie 	switch (adev->asic_type) {
1044e60f8db5SAlex Xie 	case CHIP_RAVEN:
10451daa2bfaSLe Ma 		adev->num_vmhubs = 2;
10461daa2bfaSLe Ma 
10476a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1048f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
10496a42fd6fSChristian König 		} else {
10506a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
10516a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1052770d13b1SChristian König 			adev->gmc.translate_further =
10536a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
10546a42fd6fSChristian König 		}
1055e60f8db5SAlex Xie 		break;
1056e60f8db5SAlex Xie 	case CHIP_VEGA10:
1057273a14cdSAlex Deucher 	case CHIP_VEGA12:
1058d96b428cSFeifei Xu 	case CHIP_VEGA20:
10598787ee01SHuang Rui 	case CHIP_RENOIR:
10601daa2bfaSLe Ma 		adev->num_vmhubs = 2;
10611daa2bfaSLe Ma 
10628787ee01SHuang Rui 
1063e60f8db5SAlex Xie 		/*
1064e60f8db5SAlex Xie 		 * To fulfill 4-level page support,
1065e60f8db5SAlex Xie 		 * vm size is 256TB (48bit), maximum size of Vega10,
1066e60f8db5SAlex Xie 		 * block size 512 (9bit)
1067e60f8db5SAlex Xie 		 */
1068cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1069cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
1070cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1071cdba61daSwentalou 		else
1072f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1073e60f8db5SAlex Xie 		break;
10743de2ff5dSLe Ma 	case CHIP_ARCTURUS:
1075c8a6e2a3SLe Ma 		adev->num_vmhubs = 3;
1076c8a6e2a3SLe Ma 
10773de2ff5dSLe Ma 		/* Keep the vm size same with Vega20 */
10783de2ff5dSLe Ma 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
10793de2ff5dSLe Ma 		break;
1080e60f8db5SAlex Xie 	default:
1081e60f8db5SAlex Xie 		break;
1082e60f8db5SAlex Xie 	}
1083e60f8db5SAlex Xie 
1084e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
108544a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1086770d13b1SChristian König 				&adev->gmc.vm_fault);
108730da7bb1SChristian König 	if (r)
108830da7bb1SChristian König 		return r;
108930da7bb1SChristian König 
10907d19b15fSLe Ma 	if (adev->asic_type == CHIP_ARCTURUS) {
10917d19b15fSLe Ma 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
10927d19b15fSLe Ma 					&adev->gmc.vm_fault);
10937d19b15fSLe Ma 		if (r)
10947d19b15fSLe Ma 			return r;
10957d19b15fSLe Ma 	}
10967d19b15fSLe Ma 
109744a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1098770d13b1SChristian König 				&adev->gmc.vm_fault);
1099e60f8db5SAlex Xie 
1100e60f8db5SAlex Xie 	if (r)
1101e60f8db5SAlex Xie 		return r;
1102e60f8db5SAlex Xie 
1103791c4769Sxinhui pan 	/* interrupt sent to DF. */
1104791c4769Sxinhui pan 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1105791c4769Sxinhui pan 			&adev->gmc.ecc_irq);
1106791c4769Sxinhui pan 	if (r)
1107791c4769Sxinhui pan 		return r;
1108791c4769Sxinhui pan 
1109e60f8db5SAlex Xie 	/* Set the internal MC address mask
1110e60f8db5SAlex Xie 	 * This is the max address of the GPU's
1111e60f8db5SAlex Xie 	 * internal address space.
1112e60f8db5SAlex Xie 	 */
1113770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1114e60f8db5SAlex Xie 
1115244511f3SChristoph Hellwig 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1116e60f8db5SAlex Xie 	if (r) {
1117e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1118244511f3SChristoph Hellwig 		return r;
1119e60f8db5SAlex Xie 	}
1120244511f3SChristoph Hellwig 	adev->need_swiotlb = drm_need_swiotlb(44);
1121e60f8db5SAlex Xie 
112247622ba0SAlex Deucher 	if (adev->gmc.xgmi.supported) {
1123bf0a60b7SAlex Deucher 		r = gfxhub_v1_1_get_xgmi_info(adev);
1124bf0a60b7SAlex Deucher 		if (r)
1125bf0a60b7SAlex Deucher 			return r;
1126bf0a60b7SAlex Deucher 	}
1127bf0a60b7SAlex Deucher 
1128e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
1129e60f8db5SAlex Xie 	if (r)
1130e60f8db5SAlex Xie 		return r;
1131e60f8db5SAlex Xie 
1132ebdef28eSAlex Deucher 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1133ebdef28eSAlex Deucher 
1134e60f8db5SAlex Xie 	/* Memory manager */
1135e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
1136e60f8db5SAlex Xie 	if (r)
1137e60f8db5SAlex Xie 		return r;
1138e60f8db5SAlex Xie 
1139e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
1140e60f8db5SAlex Xie 	if (r)
1141e60f8db5SAlex Xie 		return r;
1142e60f8db5SAlex Xie 
114305ec3edaSChristian König 	/*
114405ec3edaSChristian König 	 * number of VMs
114505ec3edaSChristian König 	 * VMID 0 is reserved for System
114605ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
114705ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
114805ec3edaSChristian König 	 */
1149a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1150a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1151c8a6e2a3SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
115205ec3edaSChristian König 
115305ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
115405ec3edaSChristian König 
115505ec3edaSChristian König 	return 0;
1156e60f8db5SAlex Xie }
1157e60f8db5SAlex Xie 
1158e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
1159e60f8db5SAlex Xie {
1160e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1161994dcfaaSTianci.Yin 	void *stolen_vga_buf;
1162e60f8db5SAlex Xie 
11632adf1344STao Zhou 	amdgpu_gmc_ras_fini(adev);
1164f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
1165e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
11666f752ec2SAndrey Grodzovsky 
1167cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
1168994dcfaaSTianci.Yin 		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
11696f752ec2SAndrey Grodzovsky 
1170a3d9103eSAndrey Grodzovsky 	amdgpu_gart_table_vram_free(adev);
1171e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
1172a3d9103eSAndrey Grodzovsky 	amdgpu_gart_fini(adev);
1173e60f8db5SAlex Xie 
1174e60f8db5SAlex Xie 	return 0;
1175e60f8db5SAlex Xie }
1176e60f8db5SAlex Xie 
1177e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1178e60f8db5SAlex Xie {
1179946a4d5bSShaoyun Liu 
1180e60f8db5SAlex Xie 	switch (adev->asic_type) {
1181e60f8db5SAlex Xie 	case CHIP_VEGA10:
11824cd4c5c0SMonk Liu 		if (amdgpu_sriov_vf(adev))
118398cad2deSTrigger Huang 			break;
118498cad2deSTrigger Huang 		/* fall through */
1185d96b428cSFeifei Xu 	case CHIP_VEGA20:
1186946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11875c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
1188c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1189946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11905c583018SEvan Quan 						golden_settings_athub_1_0_0,
1191c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1192e60f8db5SAlex Xie 		break;
1193273a14cdSAlex Deucher 	case CHIP_VEGA12:
1194273a14cdSAlex Deucher 		break;
1195e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
11968787ee01SHuang Rui 		/* TODO for renoir */
1197946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11985c583018SEvan Quan 						golden_settings_athub_1_0_0,
1199c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1200e4f3abaaSChunming Zhou 		break;
1201e60f8db5SAlex Xie 	default:
1202e60f8db5SAlex Xie 		break;
1203e60f8db5SAlex Xie 	}
1204e60f8db5SAlex Xie }
1205e60f8db5SAlex Xie 
1206e60f8db5SAlex Xie /**
1207e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1208e60f8db5SAlex Xie  *
1209e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1210e60f8db5SAlex Xie  */
1211e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1212e60f8db5SAlex Xie {
1213cb1545f7SOak Zeng 	int r;
1214e60f8db5SAlex Xie 
12151123b989SChristian König 	if (adev->gart.bo == NULL) {
1216e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1217e60f8db5SAlex Xie 		return -EINVAL;
1218e60f8db5SAlex Xie 	}
1219ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
1220ce1b1b66SMonk Liu 	if (r)
1221ce1b1b66SMonk Liu 		return r;
1222e60f8db5SAlex Xie 
1223e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
1224e60f8db5SAlex Xie 	if (r)
1225e60f8db5SAlex Xie 		return r;
1226e60f8db5SAlex Xie 
122751cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
122851cce480SLe Ma 		r = mmhub_v9_4_gart_enable(adev);
122951cce480SLe Ma 	else
1230e60f8db5SAlex Xie 		r = mmhub_v1_0_gart_enable(adev);
1231e60f8db5SAlex Xie 	if (r)
1232e60f8db5SAlex Xie 		return r;
1233e60f8db5SAlex Xie 
1234cb1545f7SOak Zeng 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1235cb1545f7SOak Zeng 		 (unsigned)(adev->gmc.gart_size >> 20),
1236cb1545f7SOak Zeng 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1237cb1545f7SOak Zeng 	adev->gart.ready = true;
1238cb1545f7SOak Zeng 	return 0;
1239cb1545f7SOak Zeng }
1240cb1545f7SOak Zeng 
1241cb1545f7SOak Zeng static int gmc_v9_0_hw_init(void *handle)
1242cb1545f7SOak Zeng {
1243cb1545f7SOak Zeng 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1244cb1545f7SOak Zeng 	bool value;
1245cb1545f7SOak Zeng 	int r, i;
1246cb1545f7SOak Zeng 	u32 tmp;
1247cb1545f7SOak Zeng 
1248cb1545f7SOak Zeng 	/* The sequence of these two function calls matters.*/
1249cb1545f7SOak Zeng 	gmc_v9_0_init_golden_registers(adev);
1250cb1545f7SOak Zeng 
1251cb1545f7SOak Zeng 	if (adev->mode_info.num_crtc) {
1252cb1545f7SOak Zeng 		if (adev->asic_type != CHIP_ARCTURUS) {
1253cb1545f7SOak Zeng 			/* Lockout access through VGA aperture*/
1254cb1545f7SOak Zeng 			WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1255cb1545f7SOak Zeng 
1256cb1545f7SOak Zeng 			/* disable VGA render */
1257cb1545f7SOak Zeng 			WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1258cb1545f7SOak Zeng 		}
1259cb1545f7SOak Zeng 	}
1260cb1545f7SOak Zeng 
1261cb1545f7SOak Zeng 	amdgpu_device_program_register_sequence(adev,
1262cb1545f7SOak Zeng 						golden_settings_vega10_hdp,
1263cb1545f7SOak Zeng 						ARRAY_SIZE(golden_settings_vega10_hdp));
1264cb1545f7SOak Zeng 
1265cb1545f7SOak Zeng 	switch (adev->asic_type) {
1266cb1545f7SOak Zeng 	case CHIP_RAVEN:
1267cb1545f7SOak Zeng 		/* TODO for renoir */
1268cb1545f7SOak Zeng 		mmhub_v1_0_update_power_gating(adev, true);
1269cb1545f7SOak Zeng 		break;
1270f81b86a0SOak Zeng 	case CHIP_ARCTURUS:
1271f81b86a0SOak Zeng 		WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
1272f81b86a0SOak Zeng 		break;
1273cb1545f7SOak Zeng 	default:
1274cb1545f7SOak Zeng 		break;
1275cb1545f7SOak Zeng 	}
1276cb1545f7SOak Zeng 
1277846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1278e60f8db5SAlex Xie 
1279b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1280b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1281e60f8db5SAlex Xie 
1282fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1283fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1284fe2b5323STiecheng Zhou 
12851d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
1286bebc0762SHawking Zhang 	adev->nbio.funcs->hdp_flush(adev, NULL);
12871d4e0a8cSMonk Liu 
1288e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1289e60f8db5SAlex Xie 		value = false;
1290e60f8db5SAlex Xie 	else
1291e60f8db5SAlex Xie 		value = true;
1292e60f8db5SAlex Xie 
1293e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
129451cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
129551cce480SLe Ma 		mmhub_v9_4_set_fault_enable_default(adev, value);
129651cce480SLe Ma 	else
1297e60f8db5SAlex Xie 		mmhub_v1_0_set_fault_enable_default(adev, value);
12983ff98548SOak Zeng 
12993ff98548SOak Zeng 	for (i = 0; i < adev->num_vmhubs; ++i)
13003ff98548SOak Zeng 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1301e60f8db5SAlex Xie 
1302e7da754bSMonk Liu 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1303e7da754bSMonk Liu 		adev->umc.funcs->init_registers(adev);
1304e7da754bSMonk Liu 
1305e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1306e60f8db5SAlex Xie 
1307e60f8db5SAlex Xie 	return r;
1308e60f8db5SAlex Xie }
1309e60f8db5SAlex Xie 
1310e60f8db5SAlex Xie /**
1311e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1312e60f8db5SAlex Xie  *
1313e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1314e60f8db5SAlex Xie  *
1315e60f8db5SAlex Xie  * This disables all VM page table.
1316e60f8db5SAlex Xie  */
1317e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1318e60f8db5SAlex Xie {
1319e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
132051cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
132151cce480SLe Ma 		mmhub_v9_4_gart_disable(adev);
132251cce480SLe Ma 	else
1323e60f8db5SAlex Xie 		mmhub_v1_0_gart_disable(adev);
1324ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1325e60f8db5SAlex Xie }
1326e60f8db5SAlex Xie 
1327e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1328e60f8db5SAlex Xie {
1329e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1330e60f8db5SAlex Xie 
13315dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
13325dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
13335dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
13345dd696aeSTrigger Huang 		return 0;
13355dd696aeSTrigger Huang 	}
13365dd696aeSTrigger Huang 
1337791c4769Sxinhui pan 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1338770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1339e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1340e60f8db5SAlex Xie 
1341e60f8db5SAlex Xie 	return 0;
1342e60f8db5SAlex Xie }
1343e60f8db5SAlex Xie 
1344e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1345e60f8db5SAlex Xie {
1346e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1347e60f8db5SAlex Xie 
1348f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1349e60f8db5SAlex Xie }
1350e60f8db5SAlex Xie 
1351e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1352e60f8db5SAlex Xie {
1353e60f8db5SAlex Xie 	int r;
1354e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1355e60f8db5SAlex Xie 
1356e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1357e60f8db5SAlex Xie 	if (r)
1358e60f8db5SAlex Xie 		return r;
1359e60f8db5SAlex Xie 
1360620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1361e60f8db5SAlex Xie 
136232601d48SChristian König 	return 0;
1363e60f8db5SAlex Xie }
1364e60f8db5SAlex Xie 
1365e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1366e60f8db5SAlex Xie {
1367e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1368e60f8db5SAlex Xie 	return true;
1369e60f8db5SAlex Xie }
1370e60f8db5SAlex Xie 
1371e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1372e60f8db5SAlex Xie {
1373e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1374e60f8db5SAlex Xie 	return 0;
1375e60f8db5SAlex Xie }
1376e60f8db5SAlex Xie 
1377e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1378e60f8db5SAlex Xie {
1379e60f8db5SAlex Xie 	/* XXX for emulation.*/
1380e60f8db5SAlex Xie 	return 0;
1381e60f8db5SAlex Xie }
1382e60f8db5SAlex Xie 
1383e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1384e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1385e60f8db5SAlex Xie {
1386d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1387d5583d4fSHuang Rui 
138851cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
1389cb15e804SLe Ma 		mmhub_v9_4_set_clockgating(adev, state);
1390cb15e804SLe Ma 	else
1391bee7b51aSLe Ma 		mmhub_v1_0_set_clockgating(adev, state);
1392bee7b51aSLe Ma 
1393bee7b51aSLe Ma 	athub_v1_0_set_clockgating(adev, state);
1394bee7b51aSLe Ma 
1395bee7b51aSLe Ma 	return 0;
1396e60f8db5SAlex Xie }
1397e60f8db5SAlex Xie 
139813052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
139913052be5SHuang Rui {
140013052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
140113052be5SHuang Rui 
140251cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
1403cb15e804SLe Ma 		mmhub_v9_4_get_clockgating(adev, flags);
1404cb15e804SLe Ma 	else
140513052be5SHuang Rui 		mmhub_v1_0_get_clockgating(adev, flags);
1406bee7b51aSLe Ma 
1407bee7b51aSLe Ma 	athub_v1_0_get_clockgating(adev, flags);
140813052be5SHuang Rui }
140913052be5SHuang Rui 
1410e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1411e60f8db5SAlex Xie 					enum amd_powergating_state state)
1412e60f8db5SAlex Xie {
1413e60f8db5SAlex Xie 	return 0;
1414e60f8db5SAlex Xie }
1415e60f8db5SAlex Xie 
1416e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1417e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1418e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1419e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1420e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1421e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1422e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1423e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1424e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1425e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1426e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1427e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1428e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1429e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1430e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
143113052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1432e60f8db5SAlex Xie };
1433e60f8db5SAlex Xie 
1434e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1435e60f8db5SAlex Xie {
1436e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1437e60f8db5SAlex Xie 	.major = 9,
1438e60f8db5SAlex Xie 	.minor = 0,
1439e60f8db5SAlex Xie 	.rev = 0,
1440e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1441e60f8db5SAlex Xie };
1442