xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 067e75b3)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23e60f8db5SAlex Xie #include <linux/firmware.h>
24fd5fd480SChunming Zhou #include <drm/drm_cache.h>
25e60f8db5SAlex Xie #include "amdgpu.h"
26e60f8db5SAlex Xie #include "gmc_v9_0.h"
278d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
282cddc50eSHuang Rui #include "amdgpu_gem.h"
29e60f8db5SAlex Xie 
3075199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3175199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
32cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
33135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
34135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
35fb960bd2SFeifei Xu #include "vega10_enum.h"
3665417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
376ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
38250b4228SChristian König #include "oss/osssys_4_0_offset.h"
39e60f8db5SAlex Xie 
40946a4d5bSShaoyun Liu #include "soc15.h"
41e60f8db5SAlex Xie #include "soc15_common.h"
4290c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
43e60f8db5SAlex Xie 
44e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
45e60f8db5SAlex Xie #include "mmhub_v1_0.h"
46bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
47e60f8db5SAlex Xie 
4844a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
4944a99b65SAndrey Grodzovsky 
50791c4769Sxinhui pan #include "amdgpu_ras.h"
51791c4769Sxinhui pan 
52ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
53ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
54ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
55ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
56ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
57ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
58ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
59ebdef28eSAlex Deucher 
60e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
61e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
62e60f8db5SAlex Xie 
63e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
64e60f8db5SAlex Xie {
65e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
66e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
67e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
68e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
69e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
70e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
71e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
72e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
73e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
74e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
75e60f8db5SAlex Xie };
76e60f8db5SAlex Xie 
77946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
785c583018SEvan Quan {
79946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
80946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
815c583018SEvan Quan };
825c583018SEvan Quan 
83946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
845c583018SEvan Quan {
85946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
86946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
875c583018SEvan Quan };
885c583018SEvan Quan 
89791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
90791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
91791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
92791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
93791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
94791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
95791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
96791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
97791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
98791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
99791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
100791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
101791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
102791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
103791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
104791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
105791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
106791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
107791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
108791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
109791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
110791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
111791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
112791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
113791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
114791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
115791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
116791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
117791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
118791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
119791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
120791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
121791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
12202bab923SDavid Panariti };
12302bab923SDavid Panariti 
124791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
125791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
126791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
127791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
128791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
129791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
130791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
131791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
132791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
133791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
134791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
135791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
136791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
137791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
138791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
139791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
140791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
141791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
142791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
143791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
144791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
145791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
146791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
147791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
148791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
149791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
150791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
151791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
152791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
153791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
154791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
155791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
156791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
15702bab923SDavid Panariti };
15802bab923SDavid Panariti 
159791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_status_addrs[] = {
160791c4769Sxinhui pan 	(0x000143c2 + 0x00000000),
161791c4769Sxinhui pan 	(0x000143c2 + 0x00000800),
162791c4769Sxinhui pan 	(0x000143c2 + 0x00001000),
163791c4769Sxinhui pan 	(0x000143c2 + 0x00001800),
164791c4769Sxinhui pan 	(0x000543c2 + 0x00000000),
165791c4769Sxinhui pan 	(0x000543c2 + 0x00000800),
166791c4769Sxinhui pan 	(0x000543c2 + 0x00001000),
167791c4769Sxinhui pan 	(0x000543c2 + 0x00001800),
168791c4769Sxinhui pan 	(0x000943c2 + 0x00000000),
169791c4769Sxinhui pan 	(0x000943c2 + 0x00000800),
170791c4769Sxinhui pan 	(0x000943c2 + 0x00001000),
171791c4769Sxinhui pan 	(0x000943c2 + 0x00001800),
172791c4769Sxinhui pan 	(0x000d43c2 + 0x00000000),
173791c4769Sxinhui pan 	(0x000d43c2 + 0x00000800),
174791c4769Sxinhui pan 	(0x000d43c2 + 0x00001000),
175791c4769Sxinhui pan 	(0x000d43c2 + 0x00001800),
176791c4769Sxinhui pan 	(0x001143c2 + 0x00000000),
177791c4769Sxinhui pan 	(0x001143c2 + 0x00000800),
178791c4769Sxinhui pan 	(0x001143c2 + 0x00001000),
179791c4769Sxinhui pan 	(0x001143c2 + 0x00001800),
180791c4769Sxinhui pan 	(0x001543c2 + 0x00000000),
181791c4769Sxinhui pan 	(0x001543c2 + 0x00000800),
182791c4769Sxinhui pan 	(0x001543c2 + 0x00001000),
183791c4769Sxinhui pan 	(0x001543c2 + 0x00001800),
184791c4769Sxinhui pan 	(0x001943c2 + 0x00000000),
185791c4769Sxinhui pan 	(0x001943c2 + 0x00000800),
186791c4769Sxinhui pan 	(0x001943c2 + 0x00001000),
187791c4769Sxinhui pan 	(0x001943c2 + 0x00001800),
188791c4769Sxinhui pan 	(0x001d43c2 + 0x00000000),
189791c4769Sxinhui pan 	(0x001d43c2 + 0x00000800),
190791c4769Sxinhui pan 	(0x001d43c2 + 0x00001000),
191791c4769Sxinhui pan 	(0x001d43c2 + 0x00001800),
19202bab923SDavid Panariti };
19302bab923SDavid Panariti 
194791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
195791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
196791c4769Sxinhui pan 		unsigned type,
197791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
198791c4769Sxinhui pan {
199791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
200791c4769Sxinhui pan 
201791c4769Sxinhui pan 	bits = 0x7f;
202791c4769Sxinhui pan 
203791c4769Sxinhui pan 	switch (state) {
204791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
205791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
206791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
207791c4769Sxinhui pan 			tmp = RREG32(reg);
208791c4769Sxinhui pan 			tmp &= ~bits;
209791c4769Sxinhui pan 			WREG32(reg, tmp);
210791c4769Sxinhui pan 		}
211791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
212791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
213791c4769Sxinhui pan 			tmp = RREG32(reg);
214791c4769Sxinhui pan 			tmp &= ~bits;
215791c4769Sxinhui pan 			WREG32(reg, tmp);
216791c4769Sxinhui pan 		}
217791c4769Sxinhui pan 		break;
218791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
219791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
220791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
221791c4769Sxinhui pan 			tmp = RREG32(reg);
222791c4769Sxinhui pan 			tmp |= bits;
223791c4769Sxinhui pan 			WREG32(reg, tmp);
224791c4769Sxinhui pan 		}
225791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
226791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
227791c4769Sxinhui pan 			tmp = RREG32(reg);
228791c4769Sxinhui pan 			tmp |= bits;
229791c4769Sxinhui pan 			WREG32(reg, tmp);
230791c4769Sxinhui pan 		}
231791c4769Sxinhui pan 		break;
232791c4769Sxinhui pan 	default:
233791c4769Sxinhui pan 		break;
234791c4769Sxinhui pan 	}
235791c4769Sxinhui pan 
236791c4769Sxinhui pan 	return 0;
237791c4769Sxinhui pan }
238791c4769Sxinhui pan 
239791c4769Sxinhui pan static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
240791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
241791c4769Sxinhui pan {
2429b54d201SEric Huang 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
243791c4769Sxinhui pan 	amdgpu_ras_reset_gpu(adev, 0);
244791c4769Sxinhui pan 	return AMDGPU_RAS_UE;
245791c4769Sxinhui pan }
246791c4769Sxinhui pan 
247791c4769Sxinhui pan static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
248791c4769Sxinhui pan 		struct amdgpu_irq_src *source,
249791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
250791c4769Sxinhui pan {
25114cfde84Sxinhui pan 	struct ras_common_if *ras_if = adev->gmc.ras_if;
252791c4769Sxinhui pan 	struct ras_dispatch_if ih_data = {
253791c4769Sxinhui pan 		.entry = entry,
254791c4769Sxinhui pan 	};
25514cfde84Sxinhui pan 
25614cfde84Sxinhui pan 	if (!ras_if)
25714cfde84Sxinhui pan 		return 0;
25814cfde84Sxinhui pan 
25914cfde84Sxinhui pan 	ih_data.head = *ras_if;
26014cfde84Sxinhui pan 
261791c4769Sxinhui pan 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
262791c4769Sxinhui pan 	return 0;
263791c4769Sxinhui pan }
264791c4769Sxinhui pan 
265e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
266e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
267e60f8db5SAlex Xie 					unsigned type,
268e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
269e60f8db5SAlex Xie {
270e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
271ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
272e60f8db5SAlex Xie 
27311250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
27411250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
27511250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
27611250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
27711250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
27811250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
27911250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
28011250164SChristian König 
281e60f8db5SAlex Xie 	switch (state) {
282e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
283ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
284ae6d1416STom St Denis 			hub = &adev->vmhub[j];
285e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
286e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
287e60f8db5SAlex Xie 				tmp = RREG32(reg);
288e60f8db5SAlex Xie 				tmp &= ~bits;
289e60f8db5SAlex Xie 				WREG32(reg, tmp);
290e60f8db5SAlex Xie 			}
291e60f8db5SAlex Xie 		}
292e60f8db5SAlex Xie 		break;
293e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
294ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
295ae6d1416STom St Denis 			hub = &adev->vmhub[j];
296e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
297e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
298e60f8db5SAlex Xie 				tmp = RREG32(reg);
299e60f8db5SAlex Xie 				tmp |= bits;
300e60f8db5SAlex Xie 				WREG32(reg, tmp);
301e60f8db5SAlex Xie 			}
302e60f8db5SAlex Xie 		}
303e60f8db5SAlex Xie 	default:
304e60f8db5SAlex Xie 		break;
305e60f8db5SAlex Xie 	}
306e60f8db5SAlex Xie 
307e60f8db5SAlex Xie 	return 0;
308e60f8db5SAlex Xie }
309e60f8db5SAlex Xie 
310e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
311e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
312e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
313e60f8db5SAlex Xie {
314c4f46f22SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
315c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
3164d6cbde3SFelix Kuehling 	uint32_t status = 0;
317e60f8db5SAlex Xie 	u64 addr;
318e60f8db5SAlex Xie 
319e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
320e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
321e60f8db5SAlex Xie 
322c1a8abd9SChristian König 	if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
323c1a8abd9SChristian König 						    entry->timestamp))
32422666cc1SChristian König 		return 1; /* This also prevents sending it to KFD */
32522666cc1SChristian König 
326c1a8abd9SChristian König 	/* If it's the first fault for this address, process it normally */
32779a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
3285a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
3295a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
3304d6cbde3SFelix Kuehling 	}
331e60f8db5SAlex Xie 
3324d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
33305794effSShirish S 		struct amdgpu_task_info task_info;
334efaa9646SAndrey Grodzovsky 
33505794effSShirish S 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
336efaa9646SAndrey Grodzovsky 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
337efaa9646SAndrey Grodzovsky 
3384d6cbde3SFelix Kuehling 		dev_err(adev->dev,
339c468f9e2SChristian König 			"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
340c468f9e2SChristian König 			"pasid:%u, for process %s pid %d thread %s pid %d)\n",
341c4f46f22SChristian König 			entry->vmid_src ? "mmhub" : "gfxhub",
342c468f9e2SChristian König 			retry_fault ? "retry" : "no-retry",
343c4f46f22SChristian König 			entry->src_id, entry->ring_id, entry->vmid,
344efaa9646SAndrey Grodzovsky 			entry->pasid, task_info.process_name, task_info.tgid,
345efaa9646SAndrey Grodzovsky 			task_info.task_name, task_info.pid);
3467d0aa376SAndrey Grodzovsky 		dev_err(adev->dev, "  in page starting at address 0x%016llx from %d\n",
34779a0c465SMonk Liu 			addr, entry->client_id);
3484d6cbde3SFelix Kuehling 		if (!amdgpu_sriov_vf(adev))
3494d6cbde3SFelix Kuehling 			dev_err(adev->dev,
3504d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
3514d6cbde3SFelix Kuehling 				status);
35279a0c465SMonk Liu 	}
353e60f8db5SAlex Xie 
354e60f8db5SAlex Xie 	return 0;
355e60f8db5SAlex Xie }
356e60f8db5SAlex Xie 
357e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
358e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
359e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
360e60f8db5SAlex Xie };
361e60f8db5SAlex Xie 
362791c4769Sxinhui pan 
363791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
364791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
365791c4769Sxinhui pan 	.process = gmc_v9_0_process_ecc_irq,
366791c4769Sxinhui pan };
367791c4769Sxinhui pan 
368e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
369e60f8db5SAlex Xie {
370770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
371770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
372791c4769Sxinhui pan 
373791c4769Sxinhui pan 	adev->gmc.ecc_irq.num_types = 1;
374791c4769Sxinhui pan 	adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
375e60f8db5SAlex Xie }
376e60f8db5SAlex Xie 
3772a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
3782a79d868SYong Zhao 					uint32_t flush_type)
37903f89febSChristian König {
38003f89febSChristian König 	u32 req = 0;
38103f89febSChristian König 
38203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
383c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
3842a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
38503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
38603f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
38703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
38803f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
38903f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
39003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
39103f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
39203f89febSChristian König 
39303f89febSChristian König 	return req;
39403f89febSChristian König }
39503f89febSChristian König 
396e60f8db5SAlex Xie /*
397e60f8db5SAlex Xie  * GART
398e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
399e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
400e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
401e60f8db5SAlex Xie  */
402e60f8db5SAlex Xie 
403e60f8db5SAlex Xie /**
4042a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
405e60f8db5SAlex Xie  *
406e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
407e60f8db5SAlex Xie  * @vmid: vm instance to flush
4082a79d868SYong Zhao  * @flush_type: the flush type
409e60f8db5SAlex Xie  *
4102a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
411e60f8db5SAlex Xie  */
412132f34e4SChristian König static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
4132a79d868SYong Zhao 				uint32_t vmid, uint32_t flush_type)
414e60f8db5SAlex Xie {
415e60f8db5SAlex Xie 	const unsigned eng = 17;
416e60f8db5SAlex Xie 	unsigned i, j;
417e60f8db5SAlex Xie 
418e60f8db5SAlex Xie 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
419e60f8db5SAlex Xie 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
4202a79d868SYong Zhao 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
421e60f8db5SAlex Xie 
42282d1a1b1SChengming Gui 		/* This is necessary for a HW workaround under SRIOV as well
42382d1a1b1SChengming Gui 		 * as GFXOFF under bare metal
42482d1a1b1SChengming Gui 		 */
42582d1a1b1SChengming Gui 		if (adev->gfx.kiq.ring.sched.ready &&
42682d1a1b1SChengming Gui 		    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
42782d1a1b1SChengming Gui 		    !adev->in_gpu_reset) {
428af5fe1e9SChristian König 			uint32_t req = hub->vm_inv_eng0_req + eng;
429af5fe1e9SChristian König 			uint32_t ack = hub->vm_inv_eng0_ack + eng;
430af5fe1e9SChristian König 
431af5fe1e9SChristian König 			amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
432af5fe1e9SChristian König 							   1 << vmid);
4333890d111SEmily Deng 			continue;
434fc0faf04SEmily Deng 		}
4353890d111SEmily Deng 
4363890d111SEmily Deng 		spin_lock(&adev->gmc.invalidate_lock);
437c7a7266bSXiangliang Yu 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
438e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
439c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
440396557b0SChristian König 			if (tmp & (1 << vmid))
441e60f8db5SAlex Xie 				break;
442e60f8db5SAlex Xie 			udelay(1);
443e60f8db5SAlex Xie 		}
4443890d111SEmily Deng 		spin_unlock(&adev->gmc.invalidate_lock);
445396557b0SChristian König 		if (j < adev->usec_timeout)
446e60f8db5SAlex Xie 			continue;
447396557b0SChristian König 
448e60f8db5SAlex Xie 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
449e60f8db5SAlex Xie 	}
450e60f8db5SAlex Xie }
451e60f8db5SAlex Xie 
4529096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
453c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
4549096d6e5SChristian König {
455250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
456250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
4572a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
4589096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
4599096d6e5SChristian König 
4609096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
4619096d6e5SChristian König 			      lower_32_bits(pd_addr));
4629096d6e5SChristian König 
4639096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
4649096d6e5SChristian König 			      upper_32_bits(pd_addr));
4659096d6e5SChristian König 
466f8bc9037SAlex Deucher 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
467f8bc9037SAlex Deucher 					    hub->vm_inv_eng0_ack + eng,
468f8bc9037SAlex Deucher 					    req, 1 << vmid);
469f732b6b3SChristian König 
4709096d6e5SChristian König 	return pd_addr;
4719096d6e5SChristian König }
4729096d6e5SChristian König 
473c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
474c633c00bSChristian König 					unsigned pasid)
475c633c00bSChristian König {
476c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
477c633c00bSChristian König 	uint32_t reg;
478c633c00bSChristian König 
479c633c00bSChristian König 	if (ring->funcs->vmhub == AMDGPU_GFXHUB)
480c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
481c633c00bSChristian König 	else
482c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
483c633c00bSChristian König 
484c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
485c633c00bSChristian König }
486c633c00bSChristian König 
487e60f8db5SAlex Xie /*
488e60f8db5SAlex Xie  * PTE format on VEGA 10:
489e60f8db5SAlex Xie  * 63:59 reserved
490e60f8db5SAlex Xie  * 58:57 mtype
491e60f8db5SAlex Xie  * 56 F
492e60f8db5SAlex Xie  * 55 L
493e60f8db5SAlex Xie  * 54 P
494e60f8db5SAlex Xie  * 53 SW
495e60f8db5SAlex Xie  * 52 T
496e60f8db5SAlex Xie  * 50:48 reserved
497e60f8db5SAlex Xie  * 47:12 4k physical page base address
498e60f8db5SAlex Xie  * 11:7 fragment
499e60f8db5SAlex Xie  * 6 write
500e60f8db5SAlex Xie  * 5 read
501e60f8db5SAlex Xie  * 4 exe
502e60f8db5SAlex Xie  * 3 Z
503e60f8db5SAlex Xie  * 2 snooped
504e60f8db5SAlex Xie  * 1 system
505e60f8db5SAlex Xie  * 0 valid
506e60f8db5SAlex Xie  *
507e60f8db5SAlex Xie  * PDE format on VEGA 10:
508e60f8db5SAlex Xie  * 63:59 block fragment size
509e60f8db5SAlex Xie  * 58:55 reserved
510e60f8db5SAlex Xie  * 54 P
511e60f8db5SAlex Xie  * 53:48 reserved
512e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
513e60f8db5SAlex Xie  * 5:3 reserved
514e60f8db5SAlex Xie  * 2 C
515e60f8db5SAlex Xie  * 1 system
516e60f8db5SAlex Xie  * 0 valid
517e60f8db5SAlex Xie  */
518e60f8db5SAlex Xie 
519e60f8db5SAlex Xie static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
520e60f8db5SAlex Xie 						uint32_t flags)
521e60f8db5SAlex Xie 
522e60f8db5SAlex Xie {
523e60f8db5SAlex Xie 	uint64_t pte_flag = 0;
524e60f8db5SAlex Xie 
525e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
526e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
527e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_READABLE)
528e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_READABLE;
529e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
530e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_WRITEABLE;
531e60f8db5SAlex Xie 
532e60f8db5SAlex Xie 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
533e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
534e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
535e60f8db5SAlex Xie 		break;
536e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
537e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
538e60f8db5SAlex Xie 		break;
539e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
540e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
541e60f8db5SAlex Xie 		break;
542e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
543e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
544e60f8db5SAlex Xie 		break;
545e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
546e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
547e60f8db5SAlex Xie 		break;
548e60f8db5SAlex Xie 	default:
549e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
550e60f8db5SAlex Xie 		break;
551e60f8db5SAlex Xie 	}
552e60f8db5SAlex Xie 
553e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_PRT)
554e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_PRT;
555e60f8db5SAlex Xie 
556e60f8db5SAlex Xie 	return pte_flag;
557e60f8db5SAlex Xie }
558e60f8db5SAlex Xie 
5593de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
5603de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
561f75e237cSChristian König {
562bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
5633de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
564770d13b1SChristian König 			adev->gmc.vram_start;
5653de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
5666a42fd6fSChristian König 
567770d13b1SChristian König 	if (!adev->gmc.translate_further)
5686a42fd6fSChristian König 		return;
5696a42fd6fSChristian König 
5706a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
5716a42fd6fSChristian König 		/* Set the block fragment size */
5726a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
5736a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
5746a42fd6fSChristian König 
5756a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
5766a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
5776a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
5786a42fd6fSChristian König 		else
5796a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
5806a42fd6fSChristian König 	}
581f75e237cSChristian König }
582f75e237cSChristian König 
583132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
584132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
5859096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
586c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
587b1166325SChristian König 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
588b1166325SChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde
589e60f8db5SAlex Xie };
590e60f8db5SAlex Xie 
591132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
592e60f8db5SAlex Xie {
593132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
594e60f8db5SAlex Xie }
595e60f8db5SAlex Xie 
596e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
597e60f8db5SAlex Xie {
598e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
599e60f8db5SAlex Xie 
600132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
601e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
602e60f8db5SAlex Xie 
603770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
604770d13b1SChristian König 	adev->gmc.shared_aperture_end =
605770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
606bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
607770d13b1SChristian König 	adev->gmc.private_aperture_end =
608770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
609a7ea6548SAlex Deucher 
610e60f8db5SAlex Xie 	return 0;
611e60f8db5SAlex Xie }
612e60f8db5SAlex Xie 
613cd2b5623SAlex Deucher static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
614cd2b5623SAlex Deucher {
615cd2b5623SAlex Deucher 
616cd2b5623SAlex Deucher 	/*
617cd2b5623SAlex Deucher 	 * TODO:
618cd2b5623SAlex Deucher 	 * Currently there is a bug where some memory client outside
619cd2b5623SAlex Deucher 	 * of the driver writes to first 8M of VRAM on S3 resume,
620cd2b5623SAlex Deucher 	 * this overrides GART which by default gets placed in first 8M and
621cd2b5623SAlex Deucher 	 * causes VM_FAULTS once GTT is accessed.
622cd2b5623SAlex Deucher 	 * Keep the stolen memory reservation until the while this is not solved.
623cd2b5623SAlex Deucher 	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
624cd2b5623SAlex Deucher 	 */
625cd2b5623SAlex Deucher 	switch (adev->asic_type) {
62695010ba7SAlex Deucher 	case CHIP_VEGA10:
6276abc0c8fSAlex Deucher 		return true;
6286abc0c8fSAlex Deucher 	case CHIP_RAVEN:
62937910935SFlora Cui 		return (adev->pdev->device == 0x15d8);
6306abc0c8fSAlex Deucher 	case CHIP_VEGA12:
631cd2b5623SAlex Deucher 	case CHIP_VEGA20:
632cd2b5623SAlex Deucher 	default:
6336abc0c8fSAlex Deucher 		return false;
634cd2b5623SAlex Deucher 	}
635cd2b5623SAlex Deucher }
636cd2b5623SAlex Deucher 
637c713a461SEvan Quan static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
638c713a461SEvan Quan {
639c713a461SEvan Quan 	struct amdgpu_ring *ring;
640c713a461SEvan Quan 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
641c713a461SEvan Quan 		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP};
642c713a461SEvan Quan 	unsigned i;
643c713a461SEvan Quan 	unsigned vmhub, inv_eng;
644c713a461SEvan Quan 
645c713a461SEvan Quan 	for (i = 0; i < adev->num_rings; ++i) {
646c713a461SEvan Quan 		ring = adev->rings[i];
647c713a461SEvan Quan 		vmhub = ring->funcs->vmhub;
648c713a461SEvan Quan 
649c713a461SEvan Quan 		inv_eng = ffs(vm_inv_engs[vmhub]);
650c713a461SEvan Quan 		if (!inv_eng) {
651c713a461SEvan Quan 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
652c713a461SEvan Quan 				ring->name);
653c713a461SEvan Quan 			return -EINVAL;
654c713a461SEvan Quan 		}
655c713a461SEvan Quan 
656c713a461SEvan Quan 		ring->vm_inv_eng = inv_eng - 1;
65772464382SChristian König 		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
658c713a461SEvan Quan 
659c713a461SEvan Quan 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
660c713a461SEvan Quan 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
661c713a461SEvan Quan 	}
662c713a461SEvan Quan 
663c713a461SEvan Quan 	return 0;
664c713a461SEvan Quan }
665c713a461SEvan Quan 
666791c4769Sxinhui pan static int gmc_v9_0_ecc_late_init(void *handle)
667791c4769Sxinhui pan {
668791c4769Sxinhui pan 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
669791c4769Sxinhui pan 	struct ras_common_if **ras_if = &adev->gmc.ras_if;
670791c4769Sxinhui pan 	struct ras_ih_if ih_info = {
671791c4769Sxinhui pan 		.cb = gmc_v9_0_process_ras_data_cb,
672791c4769Sxinhui pan 	};
673791c4769Sxinhui pan 	struct ras_fs_if fs_info = {
674791c4769Sxinhui pan 		.sysfs_name = "umc_err_count",
675791c4769Sxinhui pan 		.debugfs_name = "umc_err_inject",
676791c4769Sxinhui pan 	};
677791c4769Sxinhui pan 	struct ras_common_if ras_block = {
678791c4769Sxinhui pan 		.block = AMDGPU_RAS_BLOCK__UMC,
679791c4769Sxinhui pan 		.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
680791c4769Sxinhui pan 		.sub_block_index = 0,
681791c4769Sxinhui pan 		.name = "umc",
682791c4769Sxinhui pan 	};
683791c4769Sxinhui pan 	int r;
684791c4769Sxinhui pan 
685791c4769Sxinhui pan 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
68653d65054Sxinhui pan 		amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
687791c4769Sxinhui pan 		return 0;
688791c4769Sxinhui pan 	}
689acbbee01Sxinhui pan 	/* handle resume path. */
690acbbee01Sxinhui pan 	if (*ras_if)
691acbbee01Sxinhui pan 		goto resume;
692791c4769Sxinhui pan 
693791c4769Sxinhui pan 	*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
694791c4769Sxinhui pan 	if (!*ras_if)
695791c4769Sxinhui pan 		return -ENOMEM;
696791c4769Sxinhui pan 
697791c4769Sxinhui pan 	**ras_if = ras_block;
698791c4769Sxinhui pan 
69953d65054Sxinhui pan 	r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
700791c4769Sxinhui pan 	if (r)
701791c4769Sxinhui pan 		goto feature;
702791c4769Sxinhui pan 
703791c4769Sxinhui pan 	ih_info.head = **ras_if;
704791c4769Sxinhui pan 	fs_info.head = **ras_if;
705791c4769Sxinhui pan 
706791c4769Sxinhui pan 	r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
707791c4769Sxinhui pan 	if (r)
708791c4769Sxinhui pan 		goto interrupt;
709791c4769Sxinhui pan 
710791c4769Sxinhui pan 	r = amdgpu_ras_debugfs_create(adev, &fs_info);
711791c4769Sxinhui pan 	if (r)
712791c4769Sxinhui pan 		goto debugfs;
713791c4769Sxinhui pan 
714791c4769Sxinhui pan 	r = amdgpu_ras_sysfs_create(adev, &fs_info);
715791c4769Sxinhui pan 	if (r)
716791c4769Sxinhui pan 		goto sysfs;
717acbbee01Sxinhui pan resume:
718791c4769Sxinhui pan 	r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
719791c4769Sxinhui pan 	if (r)
720791c4769Sxinhui pan 		goto irq;
721791c4769Sxinhui pan 
722791c4769Sxinhui pan 	return 0;
723791c4769Sxinhui pan irq:
724791c4769Sxinhui pan 	amdgpu_ras_sysfs_remove(adev, *ras_if);
725791c4769Sxinhui pan sysfs:
726791c4769Sxinhui pan 	amdgpu_ras_debugfs_remove(adev, *ras_if);
727791c4769Sxinhui pan debugfs:
728791c4769Sxinhui pan 	amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
729791c4769Sxinhui pan interrupt:
730791c4769Sxinhui pan 	amdgpu_ras_feature_enable(adev, *ras_if, 0);
731791c4769Sxinhui pan feature:
732791c4769Sxinhui pan 	kfree(*ras_if);
733791c4769Sxinhui pan 	*ras_if = NULL;
734791c4769Sxinhui pan 	return -EINVAL;
735791c4769Sxinhui pan }
736791c4769Sxinhui pan 
737791c4769Sxinhui pan 
738e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
739e60f8db5SAlex Xie {
740e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
741f49ea9f8SHawking Zhang 	bool r;
7424789c463SChristian König 
743cd2b5623SAlex Deucher 	if (!gmc_v9_0_keep_stolen_memory(adev))
744cd2b5623SAlex Deucher 		amdgpu_bo_late_init(adev);
7456f752ec2SAndrey Grodzovsky 
746c713a461SEvan Quan 	r = gmc_v9_0_allocate_vm_inv_eng(adev);
747c713a461SEvan Quan 	if (r)
748c713a461SEvan Quan 		return r;
749f49ea9f8SHawking Zhang 	/* Check if ecc is available */
750f49ea9f8SHawking Zhang 	if (!amdgpu_sriov_vf(adev)) {
751f49ea9f8SHawking Zhang 		switch (adev->asic_type) {
752f49ea9f8SHawking Zhang 		case CHIP_VEGA10:
753f49ea9f8SHawking Zhang 		case CHIP_VEGA20:
754f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_mem_ecc_supported(adev);
755f49ea9f8SHawking Zhang 			if (!r) {
75602bab923SDavid Panariti 				DRM_INFO("ECC is not present.\n");
757f49ea9f8SHawking Zhang 				if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
758e1d1a772SAlex Deucher 					adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
75902bab923SDavid Panariti 			} else {
760f49ea9f8SHawking Zhang 				DRM_INFO("ECC is active.\n");
761f49ea9f8SHawking Zhang 			}
762f49ea9f8SHawking Zhang 
763f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_sram_ecc_supported(adev);
764f49ea9f8SHawking Zhang 			if (!r) {
765f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is not present.\n");
766f49ea9f8SHawking Zhang 			} else {
767f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is active.\n");
768f49ea9f8SHawking Zhang 			}
769f49ea9f8SHawking Zhang 			break;
770f49ea9f8SHawking Zhang 		default:
771f49ea9f8SHawking Zhang 			break;
77202bab923SDavid Panariti 		}
7735ba4fa35SAlex Deucher 	}
77402bab923SDavid Panariti 
775791c4769Sxinhui pan 	r = gmc_v9_0_ecc_late_init(handle);
776791c4769Sxinhui pan 	if (r)
777e60f8db5SAlex Xie 		return r;
778e60f8db5SAlex Xie 
779770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
780e60f8db5SAlex Xie }
781e60f8db5SAlex Xie 
782e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
783770d13b1SChristian König 					struct amdgpu_gmc *mc)
784e60f8db5SAlex Xie {
785e60f8db5SAlex Xie 	u64 base = 0;
786e60f8db5SAlex Xie 	if (!amdgpu_sriov_vf(adev))
787e60f8db5SAlex Xie 		base = mmhub_v1_0_get_fb_location(adev);
7886fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
7896fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
79083afe835SOak Zeng 	amdgpu_gmc_vram_location(adev, mc, base);
791961c75cfSChristian König 	amdgpu_gmc_gart_location(adev, mc);
792c3e1b43cSChristian König 	if (!amdgpu_sriov_vf(adev))
793c3e1b43cSChristian König 		amdgpu_gmc_agp_location(adev, mc);
794e60f8db5SAlex Xie 	/* base offset of vram pages */
795e60f8db5SAlex Xie 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
7966fdd68b1SAlex Deucher 
7976fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
7986fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
7996fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
800e60f8db5SAlex Xie }
801e60f8db5SAlex Xie 
802e60f8db5SAlex Xie /**
803e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
804e60f8db5SAlex Xie  *
805e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
806e60f8db5SAlex Xie  *
807e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
808e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
809e60f8db5SAlex Xie  * Returns 0 for success.
810e60f8db5SAlex Xie  */
811e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
812e60f8db5SAlex Xie {
813e60f8db5SAlex Xie 	int chansize, numchan;
814e60f8db5SAlex Xie 	int r;
815e60f8db5SAlex Xie 
816067e75b3SAlex Deucher 	if (amdgpu_sriov_vf(adev)) {
817067e75b3SAlex Deucher 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
818067e75b3SAlex Deucher 		 * and DF related registers is not readable, seems hardcord is the
819067e75b3SAlex Deucher 		 * only way to set the correct vram_width
820067e75b3SAlex Deucher 		 */
821067e75b3SAlex Deucher 		adev->gmc.vram_width = 2048;
822067e75b3SAlex Deucher 	} else if (amdgpu_emu_mode != 1) {
823770d13b1SChristian König 		adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
824067e75b3SAlex Deucher 	}
825067e75b3SAlex Deucher 
826770d13b1SChristian König 	if (!adev->gmc.vram_width) {
827e60f8db5SAlex Xie 		/* hbm memory channel size */
828585b7f16STom St Denis 		if (adev->flags & AMD_IS_APU)
829585b7f16STom St Denis 			chansize = 64;
830585b7f16STom St Denis 		else
831e60f8db5SAlex Xie 			chansize = 128;
832e60f8db5SAlex Xie 
833070706c0SHawking Zhang 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
834770d13b1SChristian König 		adev->gmc.vram_width = numchan * chansize;
835e60f8db5SAlex Xie 	}
836e60f8db5SAlex Xie 
837e60f8db5SAlex Xie 	/* size in MB on si */
838770d13b1SChristian König 	adev->gmc.mc_vram_size =
839bf383fb6SAlex Deucher 		adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
840770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
841e60f8db5SAlex Xie 
842e60f8db5SAlex Xie 	if (!(adev->flags & AMD_IS_APU)) {
843e60f8db5SAlex Xie 		r = amdgpu_device_resize_fb_bar(adev);
844e60f8db5SAlex Xie 		if (r)
845e60f8db5SAlex Xie 			return r;
846e60f8db5SAlex Xie 	}
847770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
848770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
849e60f8db5SAlex Xie 
850156a81beSChunming Zhou #ifdef CONFIG_X86_64
851156a81beSChunming Zhou 	if (adev->flags & AMD_IS_APU) {
852156a81beSChunming Zhou 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
853156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
854156a81beSChunming Zhou 	}
855156a81beSChunming Zhou #endif
856e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
857770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
858770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
859770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
860e60f8db5SAlex Xie 
861e60f8db5SAlex Xie 	/* set the gart size */
862e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1) {
863e60f8db5SAlex Xie 		switch (adev->asic_type) {
864e60f8db5SAlex Xie 		case CHIP_VEGA10:  /* all engines support GPUVM */
865273a14cdSAlex Deucher 		case CHIP_VEGA12:  /* all engines support GPUVM */
866d96b428cSFeifei Xu 		case CHIP_VEGA20:
867e60f8db5SAlex Xie 		default:
868fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
869e60f8db5SAlex Xie 			break;
870e60f8db5SAlex Xie 		case CHIP_RAVEN:   /* DCE SG support */
871770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
872e60f8db5SAlex Xie 			break;
873e60f8db5SAlex Xie 		}
874e60f8db5SAlex Xie 	} else {
875770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
876e60f8db5SAlex Xie 	}
877e60f8db5SAlex Xie 
878770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
879e60f8db5SAlex Xie 
880e60f8db5SAlex Xie 	return 0;
881e60f8db5SAlex Xie }
882e60f8db5SAlex Xie 
883e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
884e60f8db5SAlex Xie {
885e60f8db5SAlex Xie 	int r;
886e60f8db5SAlex Xie 
8871123b989SChristian König 	if (adev->gart.bo) {
888e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
889e60f8db5SAlex Xie 		return 0;
890e60f8db5SAlex Xie 	}
891e60f8db5SAlex Xie 	/* Initialize common gart structure */
892e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
893e60f8db5SAlex Xie 	if (r)
894e60f8db5SAlex Xie 		return r;
895e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
896e60f8db5SAlex Xie 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
897e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
898e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
899e60f8db5SAlex Xie }
900e60f8db5SAlex Xie 
901ebdef28eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
902ebdef28eSAlex Deucher {
903ebdef28eSAlex Deucher 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
904ebdef28eSAlex Deucher 	unsigned size;
905ebdef28eSAlex Deucher 
9066f752ec2SAndrey Grodzovsky 	/*
9076f752ec2SAndrey Grodzovsky 	 * TODO Remove once GART corruption is resolved
9086f752ec2SAndrey Grodzovsky 	 * Check related code in gmc_v9_0_sw_fini
9096f752ec2SAndrey Grodzovsky 	 * */
910cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
911cd2b5623SAlex Deucher 		return 9 * 1024 * 1024;
9126f752ec2SAndrey Grodzovsky 
913ebdef28eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
914ebdef28eSAlex Deucher 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
915ebdef28eSAlex Deucher 	} else {
916ebdef28eSAlex Deucher 		u32 viewport;
917ebdef28eSAlex Deucher 
918ebdef28eSAlex Deucher 		switch (adev->asic_type) {
919ebdef28eSAlex Deucher 		case CHIP_RAVEN:
920ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
921ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
922ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
923ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport,
924ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
925ebdef28eSAlex Deucher 				4);
926ebdef28eSAlex Deucher 			break;
927ebdef28eSAlex Deucher 		case CHIP_VEGA10:
928ebdef28eSAlex Deucher 		case CHIP_VEGA12:
929cd2b5623SAlex Deucher 		case CHIP_VEGA20:
930ebdef28eSAlex Deucher 		default:
931ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
932ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
933ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
934ebdef28eSAlex Deucher 				4);
935ebdef28eSAlex Deucher 			break;
936ebdef28eSAlex Deucher 		}
937ebdef28eSAlex Deucher 	}
938ebdef28eSAlex Deucher 	/* return 0 if the pre-OS buffer uses up most of vram */
939ebdef28eSAlex Deucher 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
940ebdef28eSAlex Deucher 		return 0;
9416f752ec2SAndrey Grodzovsky 
942ebdef28eSAlex Deucher 	return size;
943ebdef28eSAlex Deucher }
944ebdef28eSAlex Deucher 
945e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
946e60f8db5SAlex Xie {
947e60f8db5SAlex Xie 	int r;
948e60f8db5SAlex Xie 	int dma_bits;
949e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
950e60f8db5SAlex Xie 
951e60f8db5SAlex Xie 	gfxhub_v1_0_init(adev);
952e60f8db5SAlex Xie 	mmhub_v1_0_init(adev);
953e60f8db5SAlex Xie 
954770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
955e60f8db5SAlex Xie 
9561e09b053SHawking Zhang 	adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
957e60f8db5SAlex Xie 	switch (adev->asic_type) {
958e60f8db5SAlex Xie 	case CHIP_RAVEN:
9596a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
960f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
9616a42fd6fSChristian König 		} else {
9626a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
9636a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
964770d13b1SChristian König 			adev->gmc.translate_further =
9656a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
9666a42fd6fSChristian König 		}
967e60f8db5SAlex Xie 		break;
968e60f8db5SAlex Xie 	case CHIP_VEGA10:
969273a14cdSAlex Deucher 	case CHIP_VEGA12:
970d96b428cSFeifei Xu 	case CHIP_VEGA20:
971e60f8db5SAlex Xie 		/*
972e60f8db5SAlex Xie 		 * To fulfill 4-level page support,
973e60f8db5SAlex Xie 		 * vm size is 256TB (48bit), maximum size of Vega10,
974e60f8db5SAlex Xie 		 * block size 512 (9bit)
975e60f8db5SAlex Xie 		 */
976cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
977cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
978cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
979cdba61daSwentalou 		else
980f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
981e60f8db5SAlex Xie 		break;
982e60f8db5SAlex Xie 	default:
983e60f8db5SAlex Xie 		break;
984e60f8db5SAlex Xie 	}
985e60f8db5SAlex Xie 
986e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
98744a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
988770d13b1SChristian König 				&adev->gmc.vm_fault);
98930da7bb1SChristian König 	if (r)
99030da7bb1SChristian König 		return r;
99130da7bb1SChristian König 
99244a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
993770d13b1SChristian König 				&adev->gmc.vm_fault);
994e60f8db5SAlex Xie 
995e60f8db5SAlex Xie 	if (r)
996e60f8db5SAlex Xie 		return r;
997e60f8db5SAlex Xie 
998791c4769Sxinhui pan 	/* interrupt sent to DF. */
999791c4769Sxinhui pan 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1000791c4769Sxinhui pan 			&adev->gmc.ecc_irq);
1001791c4769Sxinhui pan 	if (r)
1002791c4769Sxinhui pan 		return r;
1003791c4769Sxinhui pan 
1004e60f8db5SAlex Xie 	/* Set the internal MC address mask
1005e60f8db5SAlex Xie 	 * This is the max address of the GPU's
1006e60f8db5SAlex Xie 	 * internal address space.
1007e60f8db5SAlex Xie 	 */
1008770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1009e60f8db5SAlex Xie 
1010e60f8db5SAlex Xie 	/* set DMA mask + need_dma32 flags.
1011e60f8db5SAlex Xie 	 * PCIE - can handle 44-bits.
1012e60f8db5SAlex Xie 	 * IGP - can handle 44-bits
1013e60f8db5SAlex Xie 	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
1014e60f8db5SAlex Xie 	 */
1015e60f8db5SAlex Xie 	adev->need_dma32 = false;
1016e60f8db5SAlex Xie 	dma_bits = adev->need_dma32 ? 32 : 44;
1017e60f8db5SAlex Xie 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1018e60f8db5SAlex Xie 	if (r) {
1019e60f8db5SAlex Xie 		adev->need_dma32 = true;
1020e60f8db5SAlex Xie 		dma_bits = 32;
1021e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1022e60f8db5SAlex Xie 	}
1023e60f8db5SAlex Xie 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1024e60f8db5SAlex Xie 	if (r) {
1025e60f8db5SAlex Xie 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1026e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
1027e60f8db5SAlex Xie 	}
1028913b2cb7SMichael D Labriola 	adev->need_swiotlb = drm_need_swiotlb(dma_bits);
1029e60f8db5SAlex Xie 
103047622ba0SAlex Deucher 	if (adev->gmc.xgmi.supported) {
1031bf0a60b7SAlex Deucher 		r = gfxhub_v1_1_get_xgmi_info(adev);
1032bf0a60b7SAlex Deucher 		if (r)
1033bf0a60b7SAlex Deucher 			return r;
1034bf0a60b7SAlex Deucher 	}
1035bf0a60b7SAlex Deucher 
1036e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
1037e60f8db5SAlex Xie 	if (r)
1038e60f8db5SAlex Xie 		return r;
1039e60f8db5SAlex Xie 
1040ebdef28eSAlex Deucher 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1041ebdef28eSAlex Deucher 
1042e60f8db5SAlex Xie 	/* Memory manager */
1043e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
1044e60f8db5SAlex Xie 	if (r)
1045e60f8db5SAlex Xie 		return r;
1046e60f8db5SAlex Xie 
1047e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
1048e60f8db5SAlex Xie 	if (r)
1049e60f8db5SAlex Xie 		return r;
1050e60f8db5SAlex Xie 
105105ec3edaSChristian König 	/*
105205ec3edaSChristian König 	 * number of VMs
105305ec3edaSChristian König 	 * VMID 0 is reserved for System
105405ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
105505ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
105605ec3edaSChristian König 	 */
105705ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
105805ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
105905ec3edaSChristian König 
106005ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
106105ec3edaSChristian König 
106205ec3edaSChristian König 	return 0;
1063e60f8db5SAlex Xie }
1064e60f8db5SAlex Xie 
1065e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
1066e60f8db5SAlex Xie {
1067e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1068e60f8db5SAlex Xie 
1069791c4769Sxinhui pan 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1070791c4769Sxinhui pan 			adev->gmc.ras_if) {
1071791c4769Sxinhui pan 		struct ras_common_if *ras_if = adev->gmc.ras_if;
1072791c4769Sxinhui pan 		struct ras_ih_if ih_info = {
1073791c4769Sxinhui pan 			.head = *ras_if,
1074791c4769Sxinhui pan 		};
1075791c4769Sxinhui pan 
1076791c4769Sxinhui pan 		/*remove fs first*/
1077791c4769Sxinhui pan 		amdgpu_ras_debugfs_remove(adev, ras_if);
1078791c4769Sxinhui pan 		amdgpu_ras_sysfs_remove(adev, ras_if);
1079791c4769Sxinhui pan 		/*remove the IH*/
1080791c4769Sxinhui pan 		amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1081791c4769Sxinhui pan 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1082791c4769Sxinhui pan 		kfree(ras_if);
1083791c4769Sxinhui pan 	}
1084791c4769Sxinhui pan 
1085f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
1086e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
10876f752ec2SAndrey Grodzovsky 
1088cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
10896f752ec2SAndrey Grodzovsky 		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
10906f752ec2SAndrey Grodzovsky 
1091a3d9103eSAndrey Grodzovsky 	amdgpu_gart_table_vram_free(adev);
1092e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
1093a3d9103eSAndrey Grodzovsky 	amdgpu_gart_fini(adev);
1094e60f8db5SAlex Xie 
1095e60f8db5SAlex Xie 	return 0;
1096e60f8db5SAlex Xie }
1097e60f8db5SAlex Xie 
1098e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1099e60f8db5SAlex Xie {
1100946a4d5bSShaoyun Liu 
1101e60f8db5SAlex Xie 	switch (adev->asic_type) {
1102e60f8db5SAlex Xie 	case CHIP_VEGA10:
1103d96b428cSFeifei Xu 	case CHIP_VEGA20:
1104946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11055c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
1106c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1107946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11085c583018SEvan Quan 						golden_settings_athub_1_0_0,
1109c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1110e60f8db5SAlex Xie 		break;
1111273a14cdSAlex Deucher 	case CHIP_VEGA12:
1112273a14cdSAlex Deucher 		break;
1113e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
1114946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11155c583018SEvan Quan 						golden_settings_athub_1_0_0,
1116c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1117e4f3abaaSChunming Zhou 		break;
1118e60f8db5SAlex Xie 	default:
1119e60f8db5SAlex Xie 		break;
1120e60f8db5SAlex Xie 	}
1121e60f8db5SAlex Xie }
1122e60f8db5SAlex Xie 
1123e60f8db5SAlex Xie /**
1124e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1125e60f8db5SAlex Xie  *
1126e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1127e60f8db5SAlex Xie  */
1128e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1129e60f8db5SAlex Xie {
1130e60f8db5SAlex Xie 	int r;
1131e60f8db5SAlex Xie 	bool value;
1132e60f8db5SAlex Xie 	u32 tmp;
1133e60f8db5SAlex Xie 
11349c3f2b54SAlex Deucher 	amdgpu_device_program_register_sequence(adev,
1135e60f8db5SAlex Xie 						golden_settings_vega10_hdp,
1136c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_vega10_hdp));
1137e60f8db5SAlex Xie 
11381123b989SChristian König 	if (adev->gart.bo == NULL) {
1139e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1140e60f8db5SAlex Xie 		return -EINVAL;
1141e60f8db5SAlex Xie 	}
1142ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
1143ce1b1b66SMonk Liu 	if (r)
1144ce1b1b66SMonk Liu 		return r;
1145e60f8db5SAlex Xie 
11462fcd43ceSHawking Zhang 	switch (adev->asic_type) {
11472fcd43ceSHawking Zhang 	case CHIP_RAVEN:
1148f8386b35SHawking Zhang 		mmhub_v1_0_update_power_gating(adev, true);
11492fcd43ceSHawking Zhang 		break;
11502fcd43ceSHawking Zhang 	default:
11512fcd43ceSHawking Zhang 		break;
11522fcd43ceSHawking Zhang 	}
11532fcd43ceSHawking Zhang 
1154e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
1155e60f8db5SAlex Xie 	if (r)
1156e60f8db5SAlex Xie 		return r;
1157e60f8db5SAlex Xie 
1158e60f8db5SAlex Xie 	r = mmhub_v1_0_gart_enable(adev);
1159e60f8db5SAlex Xie 	if (r)
1160e60f8db5SAlex Xie 		return r;
1161e60f8db5SAlex Xie 
1162846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1163e60f8db5SAlex Xie 
1164b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1165b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1166e60f8db5SAlex Xie 
11671d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
116869882565SChristian König 	adev->nbio_funcs->hdp_flush(adev, NULL);
11691d4e0a8cSMonk Liu 
1170e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1171e60f8db5SAlex Xie 		value = false;
1172e60f8db5SAlex Xie 	else
1173e60f8db5SAlex Xie 		value = true;
1174e60f8db5SAlex Xie 
1175e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
1176e60f8db5SAlex Xie 	mmhub_v1_0_set_fault_enable_default(adev, value);
11772a79d868SYong Zhao 	gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
1178e60f8db5SAlex Xie 
1179e60f8db5SAlex Xie 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1180770d13b1SChristian König 		 (unsigned)(adev->gmc.gart_size >> 20),
11814e830fb1SChristian König 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1182e60f8db5SAlex Xie 	adev->gart.ready = true;
1183e60f8db5SAlex Xie 	return 0;
1184e60f8db5SAlex Xie }
1185e60f8db5SAlex Xie 
1186e60f8db5SAlex Xie static int gmc_v9_0_hw_init(void *handle)
1187e60f8db5SAlex Xie {
1188e60f8db5SAlex Xie 	int r;
1189e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1190e60f8db5SAlex Xie 
1191e60f8db5SAlex Xie 	/* The sequence of these two function calls matters.*/
1192e60f8db5SAlex Xie 	gmc_v9_0_init_golden_registers(adev);
1193e60f8db5SAlex Xie 
1194edca2d05SAlex Deucher 	if (adev->mode_info.num_crtc) {
1195edca2d05SAlex Deucher 		/* Lockout access through VGA aperture*/
11964d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1197edca2d05SAlex Deucher 
1198edca2d05SAlex Deucher 		/* disable VGA render */
11994d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1200edca2d05SAlex Deucher 	}
1201edca2d05SAlex Deucher 
1202e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1203e60f8db5SAlex Xie 
1204e60f8db5SAlex Xie 	return r;
1205e60f8db5SAlex Xie }
1206e60f8db5SAlex Xie 
1207e60f8db5SAlex Xie /**
1208e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1209e60f8db5SAlex Xie  *
1210e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1211e60f8db5SAlex Xie  *
1212e60f8db5SAlex Xie  * This disables all VM page table.
1213e60f8db5SAlex Xie  */
1214e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1215e60f8db5SAlex Xie {
1216e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
1217e60f8db5SAlex Xie 	mmhub_v1_0_gart_disable(adev);
1218ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1219e60f8db5SAlex Xie }
1220e60f8db5SAlex Xie 
1221e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1222e60f8db5SAlex Xie {
1223e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1224e60f8db5SAlex Xie 
12255dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
12265dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
12275dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
12285dd696aeSTrigger Huang 		return 0;
12295dd696aeSTrigger Huang 	}
12305dd696aeSTrigger Huang 
1231791c4769Sxinhui pan 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1232770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1233e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1234e60f8db5SAlex Xie 
1235e60f8db5SAlex Xie 	return 0;
1236e60f8db5SAlex Xie }
1237e60f8db5SAlex Xie 
1238e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1239e60f8db5SAlex Xie {
1240e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1241e60f8db5SAlex Xie 
1242f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1243e60f8db5SAlex Xie }
1244e60f8db5SAlex Xie 
1245e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1246e60f8db5SAlex Xie {
1247e60f8db5SAlex Xie 	int r;
1248e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1249e60f8db5SAlex Xie 
1250e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1251e60f8db5SAlex Xie 	if (r)
1252e60f8db5SAlex Xie 		return r;
1253e60f8db5SAlex Xie 
1254620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1255e60f8db5SAlex Xie 
125632601d48SChristian König 	return 0;
1257e60f8db5SAlex Xie }
1258e60f8db5SAlex Xie 
1259e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1260e60f8db5SAlex Xie {
1261e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1262e60f8db5SAlex Xie 	return true;
1263e60f8db5SAlex Xie }
1264e60f8db5SAlex Xie 
1265e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1266e60f8db5SAlex Xie {
1267e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1268e60f8db5SAlex Xie 	return 0;
1269e60f8db5SAlex Xie }
1270e60f8db5SAlex Xie 
1271e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1272e60f8db5SAlex Xie {
1273e60f8db5SAlex Xie 	/* XXX for emulation.*/
1274e60f8db5SAlex Xie 	return 0;
1275e60f8db5SAlex Xie }
1276e60f8db5SAlex Xie 
1277e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1278e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1279e60f8db5SAlex Xie {
1280d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1281d5583d4fSHuang Rui 
1282d5583d4fSHuang Rui 	return mmhub_v1_0_set_clockgating(adev, state);
1283e60f8db5SAlex Xie }
1284e60f8db5SAlex Xie 
128513052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
128613052be5SHuang Rui {
128713052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
128813052be5SHuang Rui 
128913052be5SHuang Rui 	mmhub_v1_0_get_clockgating(adev, flags);
129013052be5SHuang Rui }
129113052be5SHuang Rui 
1292e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1293e60f8db5SAlex Xie 					enum amd_powergating_state state)
1294e60f8db5SAlex Xie {
1295e60f8db5SAlex Xie 	return 0;
1296e60f8db5SAlex Xie }
1297e60f8db5SAlex Xie 
1298e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1299e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1300e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1301e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1302e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1303e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1304e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1305e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1306e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1307e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1308e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1309e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1310e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1311e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1312e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
131313052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1314e60f8db5SAlex Xie };
1315e60f8db5SAlex Xie 
1316e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1317e60f8db5SAlex Xie {
1318e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1319e60f8db5SAlex Xie 	.major = 9,
1320e60f8db5SAlex Xie 	.minor = 0,
1321e60f8db5SAlex Xie 	.rev = 0,
1322e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1323e60f8db5SAlex Xie };
1324