xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 994dcfaa)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23f867723bSSam Ravnborg 
24e60f8db5SAlex Xie #include <linux/firmware.h>
25f867723bSSam Ravnborg #include <linux/pci.h>
26f867723bSSam Ravnborg 
27fd5fd480SChunming Zhou #include <drm/drm_cache.h>
28f867723bSSam Ravnborg 
29e60f8db5SAlex Xie #include "amdgpu.h"
30e60f8db5SAlex Xie #include "gmc_v9_0.h"
318d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
322cddc50eSHuang Rui #include "amdgpu_gem.h"
33e60f8db5SAlex Xie 
3475199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3575199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
36cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
37135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
38135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
39fb960bd2SFeifei Xu #include "vega10_enum.h"
4065417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
416ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
42250b4228SChristian König #include "oss/osssys_4_0_offset.h"
43e60f8db5SAlex Xie 
44946a4d5bSShaoyun Liu #include "soc15.h"
45e60f8db5SAlex Xie #include "soc15_common.h"
4690c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
47e60f8db5SAlex Xie 
48e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
49e60f8db5SAlex Xie #include "mmhub_v1_0.h"
50bee7b51aSLe Ma #include "athub_v1_0.h"
51bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
5251cce480SLe Ma #include "mmhub_v9_4.h"
535b6b35aaSHawking Zhang #include "umc_v6_1.h"
54e60f8db5SAlex Xie 
5544a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
5644a99b65SAndrey Grodzovsky 
57791c4769Sxinhui pan #include "amdgpu_ras.h"
58791c4769Sxinhui pan 
59ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
60ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
61ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
62ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
63ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
64ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
65ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
66ebdef28eSAlex Deucher 
67e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
68e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
69e60f8db5SAlex Xie 
70e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
71e60f8db5SAlex Xie {
72e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
73e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
74e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
75e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
76e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
77e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
78e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
79e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
80e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
81e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
82e60f8db5SAlex Xie };
83e60f8db5SAlex Xie 
84946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
855c583018SEvan Quan {
86946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
87946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
885c583018SEvan Quan };
895c583018SEvan Quan 
90946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
915c583018SEvan Quan {
92946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
93946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
945c583018SEvan Quan };
955c583018SEvan Quan 
96791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
97791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
98791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
99791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
100791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
101791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
102791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
103791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
104791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
105791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
106791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
107791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
108791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
109791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
110791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
111791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
112791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
113791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
114791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
115791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
116791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
117791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
118791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
119791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
120791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
121791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
122791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
123791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
124791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
125791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
126791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
127791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
128791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
12902bab923SDavid Panariti };
13002bab923SDavid Panariti 
131791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
132791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
133791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
134791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
135791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
136791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
137791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
138791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
139791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
140791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
141791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
142791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
143791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
144791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
145791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
146791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
147791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
148791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
149791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
150791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
151791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
152791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
153791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
154791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
155791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
156791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
157791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
158791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
159791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
160791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
161791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
162791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
163791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
16402bab923SDavid Panariti };
16502bab923SDavid Panariti 
166791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_status_addrs[] = {
167791c4769Sxinhui pan 	(0x000143c2 + 0x00000000),
168791c4769Sxinhui pan 	(0x000143c2 + 0x00000800),
169791c4769Sxinhui pan 	(0x000143c2 + 0x00001000),
170791c4769Sxinhui pan 	(0x000143c2 + 0x00001800),
171791c4769Sxinhui pan 	(0x000543c2 + 0x00000000),
172791c4769Sxinhui pan 	(0x000543c2 + 0x00000800),
173791c4769Sxinhui pan 	(0x000543c2 + 0x00001000),
174791c4769Sxinhui pan 	(0x000543c2 + 0x00001800),
175791c4769Sxinhui pan 	(0x000943c2 + 0x00000000),
176791c4769Sxinhui pan 	(0x000943c2 + 0x00000800),
177791c4769Sxinhui pan 	(0x000943c2 + 0x00001000),
178791c4769Sxinhui pan 	(0x000943c2 + 0x00001800),
179791c4769Sxinhui pan 	(0x000d43c2 + 0x00000000),
180791c4769Sxinhui pan 	(0x000d43c2 + 0x00000800),
181791c4769Sxinhui pan 	(0x000d43c2 + 0x00001000),
182791c4769Sxinhui pan 	(0x000d43c2 + 0x00001800),
183791c4769Sxinhui pan 	(0x001143c2 + 0x00000000),
184791c4769Sxinhui pan 	(0x001143c2 + 0x00000800),
185791c4769Sxinhui pan 	(0x001143c2 + 0x00001000),
186791c4769Sxinhui pan 	(0x001143c2 + 0x00001800),
187791c4769Sxinhui pan 	(0x001543c2 + 0x00000000),
188791c4769Sxinhui pan 	(0x001543c2 + 0x00000800),
189791c4769Sxinhui pan 	(0x001543c2 + 0x00001000),
190791c4769Sxinhui pan 	(0x001543c2 + 0x00001800),
191791c4769Sxinhui pan 	(0x001943c2 + 0x00000000),
192791c4769Sxinhui pan 	(0x001943c2 + 0x00000800),
193791c4769Sxinhui pan 	(0x001943c2 + 0x00001000),
194791c4769Sxinhui pan 	(0x001943c2 + 0x00001800),
195791c4769Sxinhui pan 	(0x001d43c2 + 0x00000000),
196791c4769Sxinhui pan 	(0x001d43c2 + 0x00000800),
197791c4769Sxinhui pan 	(0x001d43c2 + 0x00001000),
198791c4769Sxinhui pan 	(0x001d43c2 + 0x00001800),
19902bab923SDavid Panariti };
20002bab923SDavid Panariti 
201791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
202791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
203791c4769Sxinhui pan 		unsigned type,
204791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
205791c4769Sxinhui pan {
206791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
207791c4769Sxinhui pan 
208791c4769Sxinhui pan 	bits = 0x7f;
209791c4769Sxinhui pan 
210791c4769Sxinhui pan 	switch (state) {
211791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
212791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
213791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
214791c4769Sxinhui pan 			tmp = RREG32(reg);
215791c4769Sxinhui pan 			tmp &= ~bits;
216791c4769Sxinhui pan 			WREG32(reg, tmp);
217791c4769Sxinhui pan 		}
218791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
219791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
220791c4769Sxinhui pan 			tmp = RREG32(reg);
221791c4769Sxinhui pan 			tmp &= ~bits;
222791c4769Sxinhui pan 			WREG32(reg, tmp);
223791c4769Sxinhui pan 		}
224791c4769Sxinhui pan 		break;
225791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
226791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
227791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
228791c4769Sxinhui pan 			tmp = RREG32(reg);
229791c4769Sxinhui pan 			tmp |= bits;
230791c4769Sxinhui pan 			WREG32(reg, tmp);
231791c4769Sxinhui pan 		}
232791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
233791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
234791c4769Sxinhui pan 			tmp = RREG32(reg);
235791c4769Sxinhui pan 			tmp |= bits;
236791c4769Sxinhui pan 			WREG32(reg, tmp);
237791c4769Sxinhui pan 		}
238791c4769Sxinhui pan 		break;
239791c4769Sxinhui pan 	default:
240791c4769Sxinhui pan 		break;
241791c4769Sxinhui pan 	}
242791c4769Sxinhui pan 
243791c4769Sxinhui pan 	return 0;
244791c4769Sxinhui pan }
245791c4769Sxinhui pan 
246791c4769Sxinhui pan static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
24781e02619STao Zhou 		struct ras_err_data *err_data,
248791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
249791c4769Sxinhui pan {
2509b54d201SEric Huang 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
251045c0216STao Zhou 	if (adev->umc.funcs->query_ras_error_count)
25281e02619STao Zhou 		adev->umc.funcs->query_ras_error_count(adev, err_data);
25313b7c46cSTao Zhou 	/* umc query_ras_error_address is also responsible for clearing
25413b7c46cSTao Zhou 	 * error status
25513b7c46cSTao Zhou 	 */
25613b7c46cSTao Zhou 	if (adev->umc.funcs->query_ras_error_address)
25713b7c46cSTao Zhou 		adev->umc.funcs->query_ras_error_address(adev, err_data);
25891ba68f8STao Zhou 
25991ba68f8STao Zhou 	/* only uncorrectable error needs gpu reset */
26091ba68f8STao Zhou 	if (err_data->ue_count)
261791c4769Sxinhui pan 		amdgpu_ras_reset_gpu(adev, 0);
26291ba68f8STao Zhou 
263bd2280daSTao Zhou 	return AMDGPU_RAS_SUCCESS;
264791c4769Sxinhui pan }
265791c4769Sxinhui pan 
266791c4769Sxinhui pan static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
267791c4769Sxinhui pan 		struct amdgpu_irq_src *source,
268791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
269791c4769Sxinhui pan {
270145b03ebSTao Zhou 	struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
271791c4769Sxinhui pan 	struct ras_dispatch_if ih_data = {
272791c4769Sxinhui pan 		.entry = entry,
273791c4769Sxinhui pan 	};
27414cfde84Sxinhui pan 
27514cfde84Sxinhui pan 	if (!ras_if)
27614cfde84Sxinhui pan 		return 0;
27714cfde84Sxinhui pan 
27814cfde84Sxinhui pan 	ih_data.head = *ras_if;
27914cfde84Sxinhui pan 
280791c4769Sxinhui pan 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
281791c4769Sxinhui pan 	return 0;
282791c4769Sxinhui pan }
283791c4769Sxinhui pan 
284e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
285e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
286e60f8db5SAlex Xie 					unsigned type,
287e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
288e60f8db5SAlex Xie {
289e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
290ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
291e60f8db5SAlex Xie 
29211250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
29311250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
29411250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
29511250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
29611250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
29711250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
29811250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
29911250164SChristian König 
300e60f8db5SAlex Xie 	switch (state) {
301e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
3021daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
303ae6d1416STom St Denis 			hub = &adev->vmhub[j];
304e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
305e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
306e60f8db5SAlex Xie 				tmp = RREG32(reg);
307e60f8db5SAlex Xie 				tmp &= ~bits;
308e60f8db5SAlex Xie 				WREG32(reg, tmp);
309e60f8db5SAlex Xie 			}
310e60f8db5SAlex Xie 		}
311e60f8db5SAlex Xie 		break;
312e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
3131daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
314ae6d1416STom St Denis 			hub = &adev->vmhub[j];
315e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
316e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
317e60f8db5SAlex Xie 				tmp = RREG32(reg);
318e60f8db5SAlex Xie 				tmp |= bits;
319e60f8db5SAlex Xie 				WREG32(reg, tmp);
320e60f8db5SAlex Xie 			}
321e60f8db5SAlex Xie 		}
322e60f8db5SAlex Xie 	default:
323e60f8db5SAlex Xie 		break;
324e60f8db5SAlex Xie 	}
325e60f8db5SAlex Xie 
326e60f8db5SAlex Xie 	return 0;
327e60f8db5SAlex Xie }
328e60f8db5SAlex Xie 
329e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
330e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
331e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
332e60f8db5SAlex Xie {
33351c60898SLe Ma 	struct amdgpu_vmhub *hub;
334c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
3354d6cbde3SFelix Kuehling 	uint32_t status = 0;
336e60f8db5SAlex Xie 	u64 addr;
33751c60898SLe Ma 	char hub_name[10];
338e60f8db5SAlex Xie 
339e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
340e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
341e60f8db5SAlex Xie 
342c1a8abd9SChristian König 	if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
343c1a8abd9SChristian König 						    entry->timestamp))
34422666cc1SChristian König 		return 1; /* This also prevents sending it to KFD */
34522666cc1SChristian König 
34651c60898SLe Ma 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
34751c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub0");
34851c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
34951c60898SLe Ma 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
35051c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub1");
35151c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_1];
35251c60898SLe Ma 	} else {
35351c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "gfxhub0");
35451c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
35551c60898SLe Ma 	}
35651c60898SLe Ma 
357c1a8abd9SChristian König 	/* If it's the first fault for this address, process it normally */
35879a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
35953499173SXiaojie Yuan 		/*
36053499173SXiaojie Yuan 		 * Issue a dummy read to wait for the status register to
36153499173SXiaojie Yuan 		 * be updated to avoid reading an incorrect value due to
36253499173SXiaojie Yuan 		 * the new fast GRBM interface.
36353499173SXiaojie Yuan 		 */
36453499173SXiaojie Yuan 		if (entry->vmid_src == AMDGPU_GFXHUB_0)
36553499173SXiaojie Yuan 			RREG32(hub->vm_l2_pro_fault_status);
36653499173SXiaojie Yuan 
3675a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
3685a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
3694d6cbde3SFelix Kuehling 	}
370e60f8db5SAlex Xie 
3714d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
37205794effSShirish S 		struct amdgpu_task_info task_info;
373efaa9646SAndrey Grodzovsky 
37405794effSShirish S 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
375efaa9646SAndrey Grodzovsky 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
376efaa9646SAndrey Grodzovsky 
3774d6cbde3SFelix Kuehling 		dev_err(adev->dev,
378c468f9e2SChristian König 			"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
379c468f9e2SChristian König 			"pasid:%u, for process %s pid %d thread %s pid %d)\n",
38051c60898SLe Ma 			hub_name, retry_fault ? "retry" : "no-retry",
381c4f46f22SChristian König 			entry->src_id, entry->ring_id, entry->vmid,
382efaa9646SAndrey Grodzovsky 			entry->pasid, task_info.process_name, task_info.tgid,
383efaa9646SAndrey Grodzovsky 			task_info.task_name, task_info.pid);
3845ddd4a9aSYong Zhao 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
38579a0c465SMonk Liu 			addr, entry->client_id);
3865ddd4a9aSYong Zhao 		if (!amdgpu_sriov_vf(adev)) {
3874d6cbde3SFelix Kuehling 			dev_err(adev->dev,
3884d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
3894d6cbde3SFelix Kuehling 				status);
3905ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
3915ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3925ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
3935ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
3945ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3955ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
3965ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
3975ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3985ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
3995ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
4005ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
4015ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
4024e0ae5e2SYong Zhao 			dev_err(adev->dev, "\t RW: 0x%lx\n",
4034e0ae5e2SYong Zhao 				REG_GET_FIELD(status,
4044e0ae5e2SYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, RW));
4055ddd4a9aSYong Zhao 
4065ddd4a9aSYong Zhao 		}
40779a0c465SMonk Liu 	}
408e60f8db5SAlex Xie 
409e60f8db5SAlex Xie 	return 0;
410e60f8db5SAlex Xie }
411e60f8db5SAlex Xie 
412e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
413e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
414e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
415e60f8db5SAlex Xie };
416e60f8db5SAlex Xie 
417791c4769Sxinhui pan 
418791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
419791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
420791c4769Sxinhui pan 	.process = gmc_v9_0_process_ecc_irq,
421791c4769Sxinhui pan };
422791c4769Sxinhui pan 
423e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
424e60f8db5SAlex Xie {
425770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
426770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
427791c4769Sxinhui pan 
428791c4769Sxinhui pan 	adev->gmc.ecc_irq.num_types = 1;
429791c4769Sxinhui pan 	adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
430e60f8db5SAlex Xie }
431e60f8db5SAlex Xie 
4322a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
4332a79d868SYong Zhao 					uint32_t flush_type)
43403f89febSChristian König {
43503f89febSChristian König 	u32 req = 0;
43603f89febSChristian König 
43703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
438c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
4392a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
44003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
44103f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
44203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
44303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
44403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
44503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
44603f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
44703f89febSChristian König 
44803f89febSChristian König 	return req;
44903f89febSChristian König }
45003f89febSChristian König 
451e60f8db5SAlex Xie /*
452e60f8db5SAlex Xie  * GART
453e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
454e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
455e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
456e60f8db5SAlex Xie  */
457e60f8db5SAlex Xie 
458e60f8db5SAlex Xie /**
4592a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
460e60f8db5SAlex Xie  *
461e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
462e60f8db5SAlex Xie  * @vmid: vm instance to flush
4632a79d868SYong Zhao  * @flush_type: the flush type
464e60f8db5SAlex Xie  *
4652a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
466e60f8db5SAlex Xie  */
4673ff98548SOak Zeng static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
4683ff98548SOak Zeng 					uint32_t vmhub, uint32_t flush_type)
469e60f8db5SAlex Xie {
470e60f8db5SAlex Xie 	const unsigned eng = 17;
4713ff98548SOak Zeng 	u32 j, tmp;
4723ff98548SOak Zeng 	struct amdgpu_vmhub *hub;
473e60f8db5SAlex Xie 
4743ff98548SOak Zeng 	BUG_ON(vmhub >= adev->num_vmhubs);
4753ff98548SOak Zeng 
4763ff98548SOak Zeng 	hub = &adev->vmhub[vmhub];
4773ff98548SOak Zeng 	tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
478e60f8db5SAlex Xie 
47982d1a1b1SChengming Gui 	/* This is necessary for a HW workaround under SRIOV as well
48082d1a1b1SChengming Gui 	 * as GFXOFF under bare metal
48182d1a1b1SChengming Gui 	 */
48282d1a1b1SChengming Gui 	if (adev->gfx.kiq.ring.sched.ready &&
48382d1a1b1SChengming Gui 			(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
48482d1a1b1SChengming Gui 			!adev->in_gpu_reset) {
485af5fe1e9SChristian König 		uint32_t req = hub->vm_inv_eng0_req + eng;
486af5fe1e9SChristian König 		uint32_t ack = hub->vm_inv_eng0_ack + eng;
487af5fe1e9SChristian König 
488af5fe1e9SChristian König 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
489af5fe1e9SChristian König 				1 << vmid);
4903ff98548SOak Zeng 		return;
491fc0faf04SEmily Deng 	}
4923890d111SEmily Deng 
4933890d111SEmily Deng 	spin_lock(&adev->gmc.invalidate_lock);
494c7a7266bSXiangliang Yu 	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
49553499173SXiaojie Yuan 
49653499173SXiaojie Yuan 	/*
49753499173SXiaojie Yuan 	 * Issue a dummy read to wait for the ACK register to be cleared
49853499173SXiaojie Yuan 	 * to avoid a false ACK due to the new fast GRBM interface.
49953499173SXiaojie Yuan 	 */
50053499173SXiaojie Yuan 	if (vmhub == AMDGPU_GFXHUB_0)
50153499173SXiaojie Yuan 		RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
50253499173SXiaojie Yuan 
503e60f8db5SAlex Xie 	for (j = 0; j < adev->usec_timeout; j++) {
504c7a7266bSXiangliang Yu 		tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
505396557b0SChristian König 		if (tmp & (1 << vmid))
506e60f8db5SAlex Xie 			break;
507e60f8db5SAlex Xie 		udelay(1);
508e60f8db5SAlex Xie 	}
5093890d111SEmily Deng 	spin_unlock(&adev->gmc.invalidate_lock);
510396557b0SChristian König 	if (j < adev->usec_timeout)
5113ff98548SOak Zeng 		return;
512396557b0SChristian König 
513e60f8db5SAlex Xie 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
514e60f8db5SAlex Xie }
515e60f8db5SAlex Xie 
5169096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
517c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
5189096d6e5SChristian König {
519250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
520250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
5212a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
5229096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
5239096d6e5SChristian König 
5249096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
5259096d6e5SChristian König 			      lower_32_bits(pd_addr));
5269096d6e5SChristian König 
5279096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
5289096d6e5SChristian König 			      upper_32_bits(pd_addr));
5299096d6e5SChristian König 
530f8bc9037SAlex Deucher 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
531f8bc9037SAlex Deucher 					    hub->vm_inv_eng0_ack + eng,
532f8bc9037SAlex Deucher 					    req, 1 << vmid);
533f732b6b3SChristian König 
5349096d6e5SChristian König 	return pd_addr;
5359096d6e5SChristian König }
5369096d6e5SChristian König 
537c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
538c633c00bSChristian König 					unsigned pasid)
539c633c00bSChristian König {
540c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
541c633c00bSChristian König 	uint32_t reg;
542c633c00bSChristian König 
543f2d66571SLe Ma 	/* Do nothing because there's no lut register for mmhub1. */
544f2d66571SLe Ma 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
545f2d66571SLe Ma 		return;
546f2d66571SLe Ma 
547a2d15ed7SLe Ma 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
548c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
549c633c00bSChristian König 	else
550c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
551c633c00bSChristian König 
552c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
553c633c00bSChristian König }
554c633c00bSChristian König 
555e60f8db5SAlex Xie /*
556e60f8db5SAlex Xie  * PTE format on VEGA 10:
557e60f8db5SAlex Xie  * 63:59 reserved
558e60f8db5SAlex Xie  * 58:57 mtype
559e60f8db5SAlex Xie  * 56 F
560e60f8db5SAlex Xie  * 55 L
561e60f8db5SAlex Xie  * 54 P
562e60f8db5SAlex Xie  * 53 SW
563e60f8db5SAlex Xie  * 52 T
564e60f8db5SAlex Xie  * 50:48 reserved
565e60f8db5SAlex Xie  * 47:12 4k physical page base address
566e60f8db5SAlex Xie  * 11:7 fragment
567e60f8db5SAlex Xie  * 6 write
568e60f8db5SAlex Xie  * 5 read
569e60f8db5SAlex Xie  * 4 exe
570e60f8db5SAlex Xie  * 3 Z
571e60f8db5SAlex Xie  * 2 snooped
572e60f8db5SAlex Xie  * 1 system
573e60f8db5SAlex Xie  * 0 valid
574e60f8db5SAlex Xie  *
575e60f8db5SAlex Xie  * PDE format on VEGA 10:
576e60f8db5SAlex Xie  * 63:59 block fragment size
577e60f8db5SAlex Xie  * 58:55 reserved
578e60f8db5SAlex Xie  * 54 P
579e60f8db5SAlex Xie  * 53:48 reserved
580e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
581e60f8db5SAlex Xie  * 5:3 reserved
582e60f8db5SAlex Xie  * 2 C
583e60f8db5SAlex Xie  * 1 system
584e60f8db5SAlex Xie  * 0 valid
585e60f8db5SAlex Xie  */
586e60f8db5SAlex Xie 
587e60f8db5SAlex Xie static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
588e60f8db5SAlex Xie 						uint32_t flags)
589e60f8db5SAlex Xie 
590e60f8db5SAlex Xie {
591e60f8db5SAlex Xie 	uint64_t pte_flag = 0;
592e60f8db5SAlex Xie 
593e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
594e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
595e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_READABLE)
596e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_READABLE;
597e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
598e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_WRITEABLE;
599e60f8db5SAlex Xie 
600e60f8db5SAlex Xie 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
601e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
6027596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
603e60f8db5SAlex Xie 		break;
604e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
6057596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
606e60f8db5SAlex Xie 		break;
607e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
6087596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
609e60f8db5SAlex Xie 		break;
610e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
6117596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
612e60f8db5SAlex Xie 		break;
613e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
6147596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
615e60f8db5SAlex Xie 		break;
616e60f8db5SAlex Xie 	default:
6177596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
618e60f8db5SAlex Xie 		break;
619e60f8db5SAlex Xie 	}
620e60f8db5SAlex Xie 
621e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_PRT)
622e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_PRT;
623e60f8db5SAlex Xie 
624e60f8db5SAlex Xie 	return pte_flag;
625e60f8db5SAlex Xie }
626e60f8db5SAlex Xie 
6273de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
6283de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
629f75e237cSChristian König {
630bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
6313de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
632770d13b1SChristian König 			adev->gmc.vram_start;
6333de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
6346a42fd6fSChristian König 
635770d13b1SChristian König 	if (!adev->gmc.translate_further)
6366a42fd6fSChristian König 		return;
6376a42fd6fSChristian König 
6386a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
6396a42fd6fSChristian König 		/* Set the block fragment size */
6406a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
6416a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
6426a42fd6fSChristian König 
6436a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
6446a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
6456a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
6466a42fd6fSChristian König 		else
6476a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
6486a42fd6fSChristian König 	}
649f75e237cSChristian König }
650f75e237cSChristian König 
651132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
652132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
6539096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
654c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
655b1166325SChristian König 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
656b1166325SChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde
657e60f8db5SAlex Xie };
658e60f8db5SAlex Xie 
659132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
660e60f8db5SAlex Xie {
661132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
662e60f8db5SAlex Xie }
663e60f8db5SAlex Xie 
6645b6b35aaSHawking Zhang static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
6655b6b35aaSHawking Zhang {
6665b6b35aaSHawking Zhang 	switch (adev->asic_type) {
6675b6b35aaSHawking Zhang 	case CHIP_VEGA20:
6683aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
6693aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
6703aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
6713aacf4eaSTao Zhou 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET;
6723aacf4eaSTao Zhou 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
673045c0216STao Zhou 		adev->umc.funcs = &umc_v6_1_funcs;
6745b6b35aaSHawking Zhang 		break;
6755b6b35aaSHawking Zhang 	default:
6765b6b35aaSHawking Zhang 		break;
6775b6b35aaSHawking Zhang 	}
6785b6b35aaSHawking Zhang }
6795b6b35aaSHawking Zhang 
6803d093da0STao Zhou static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
6813d093da0STao Zhou {
6823d093da0STao Zhou 	switch (adev->asic_type) {
6833d093da0STao Zhou 	case CHIP_VEGA20:
6843d093da0STao Zhou 		adev->mmhub_funcs = &mmhub_v1_0_funcs;
6853d093da0STao Zhou 		break;
6863d093da0STao Zhou 	default:
6873d093da0STao Zhou 		break;
6883d093da0STao Zhou 	}
6893d093da0STao Zhou }
6903d093da0STao Zhou 
691e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
692e60f8db5SAlex Xie {
693e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
694e60f8db5SAlex Xie 
695132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
696e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
6975b6b35aaSHawking Zhang 	gmc_v9_0_set_umc_funcs(adev);
6983d093da0STao Zhou 	gmc_v9_0_set_mmhub_funcs(adev);
699e60f8db5SAlex Xie 
700770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
701770d13b1SChristian König 	adev->gmc.shared_aperture_end =
702770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
703bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
704770d13b1SChristian König 	adev->gmc.private_aperture_end =
705770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
706a7ea6548SAlex Deucher 
707e60f8db5SAlex Xie 	return 0;
708e60f8db5SAlex Xie }
709e60f8db5SAlex Xie 
710cd2b5623SAlex Deucher static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
711cd2b5623SAlex Deucher {
712cd2b5623SAlex Deucher 
713cd2b5623SAlex Deucher 	/*
714cd2b5623SAlex Deucher 	 * TODO:
715cd2b5623SAlex Deucher 	 * Currently there is a bug where some memory client outside
716cd2b5623SAlex Deucher 	 * of the driver writes to first 8M of VRAM on S3 resume,
717cd2b5623SAlex Deucher 	 * this overrides GART which by default gets placed in first 8M and
718cd2b5623SAlex Deucher 	 * causes VM_FAULTS once GTT is accessed.
719cd2b5623SAlex Deucher 	 * Keep the stolen memory reservation until the while this is not solved.
720cd2b5623SAlex Deucher 	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
721cd2b5623SAlex Deucher 	 */
722cd2b5623SAlex Deucher 	switch (adev->asic_type) {
72395010ba7SAlex Deucher 	case CHIP_VEGA10:
7246abc0c8fSAlex Deucher 	case CHIP_RAVEN:
725bfa3a9bbSHawking Zhang 	case CHIP_ARCTURUS:
7268787ee01SHuang Rui 	case CHIP_RENOIR:
72702122753SFlora Cui 		return true;
7286abc0c8fSAlex Deucher 	case CHIP_VEGA12:
729cd2b5623SAlex Deucher 	case CHIP_VEGA20:
730cd2b5623SAlex Deucher 	default:
7316abc0c8fSAlex Deucher 		return false;
732cd2b5623SAlex Deucher 	}
733cd2b5623SAlex Deucher }
734cd2b5623SAlex Deucher 
735c713a461SEvan Quan static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
736c713a461SEvan Quan {
737c713a461SEvan Quan 	struct amdgpu_ring *ring;
738c713a461SEvan Quan 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
739c8a6e2a3SLe Ma 		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
740c8a6e2a3SLe Ma 		GFXHUB_FREE_VM_INV_ENGS_BITMAP};
741c713a461SEvan Quan 	unsigned i;
742c713a461SEvan Quan 	unsigned vmhub, inv_eng;
743c713a461SEvan Quan 
744c713a461SEvan Quan 	for (i = 0; i < adev->num_rings; ++i) {
745c713a461SEvan Quan 		ring = adev->rings[i];
746c713a461SEvan Quan 		vmhub = ring->funcs->vmhub;
747c713a461SEvan Quan 
748c713a461SEvan Quan 		inv_eng = ffs(vm_inv_engs[vmhub]);
749c713a461SEvan Quan 		if (!inv_eng) {
750c713a461SEvan Quan 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
751c713a461SEvan Quan 				ring->name);
752c713a461SEvan Quan 			return -EINVAL;
753c713a461SEvan Quan 		}
754c713a461SEvan Quan 
755c713a461SEvan Quan 		ring->vm_inv_eng = inv_eng - 1;
75672464382SChristian König 		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
757c713a461SEvan Quan 
758c713a461SEvan Quan 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
759c713a461SEvan Quan 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
760c713a461SEvan Quan 	}
761c713a461SEvan Quan 
762c713a461SEvan Quan 	return 0;
763c713a461SEvan Quan }
764c713a461SEvan Quan 
765145b03ebSTao Zhou static int gmc_v9_0_ecc_ras_block_late_init(void *handle,
766145b03ebSTao Zhou 			struct ras_fs_if *fs_info, struct ras_common_if *ras_block)
767791c4769Sxinhui pan {
768791c4769Sxinhui pan 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
769145b03ebSTao Zhou 	struct ras_common_if **ras_if = NULL;
770791c4769Sxinhui pan 	struct ras_ih_if ih_info = {
771791c4769Sxinhui pan 		.cb = gmc_v9_0_process_ras_data_cb,
772791c4769Sxinhui pan 	};
773791c4769Sxinhui pan 	int r;
774791c4769Sxinhui pan 
775145b03ebSTao Zhou 	if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
776145b03ebSTao Zhou 		ras_if = &adev->gmc.umc_ras_if;
777145b03ebSTao Zhou 	else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB)
778145b03ebSTao Zhou 		ras_if = &adev->gmc.mmhub_ras_if;
779145b03ebSTao Zhou 	else
780145b03ebSTao Zhou 		BUG();
781145b03ebSTao Zhou 
782145b03ebSTao Zhou 	if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
783145b03ebSTao Zhou 		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
784791c4769Sxinhui pan 		return 0;
785791c4769Sxinhui pan 	}
7865b6b35aaSHawking Zhang 
787acbbee01Sxinhui pan 	/* handle resume path. */
7886121366bSxinhui pan 	if (*ras_if) {
7896121366bSxinhui pan 		/* resend ras TA enable cmd during resume.
7906121366bSxinhui pan 		 * prepare to handle failure.
7916121366bSxinhui pan 		 */
7926121366bSxinhui pan 		ih_info.head = **ras_if;
7936121366bSxinhui pan 		r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
7946121366bSxinhui pan 		if (r) {
7956121366bSxinhui pan 			if (r == -EAGAIN) {
7966121366bSxinhui pan 				/* request a gpu reset. will run again. */
7976121366bSxinhui pan 				amdgpu_ras_request_reset_on_boot(adev,
798145b03ebSTao Zhou 						ras_block->block);
7996121366bSxinhui pan 				return 0;
8006121366bSxinhui pan 			}
8016121366bSxinhui pan 			/* fail to enable ras, cleanup all. */
8026121366bSxinhui pan 			goto irq;
8036121366bSxinhui pan 		}
8046121366bSxinhui pan 		/* enable successfully. continue. */
805acbbee01Sxinhui pan 		goto resume;
8066121366bSxinhui pan 	}
807791c4769Sxinhui pan 
808791c4769Sxinhui pan 	*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
809791c4769Sxinhui pan 	if (!*ras_if)
810791c4769Sxinhui pan 		return -ENOMEM;
811791c4769Sxinhui pan 
812145b03ebSTao Zhou 	**ras_if = *ras_block;
813791c4769Sxinhui pan 
81453d65054Sxinhui pan 	r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
81536810fdbSxinhui pan 	if (r) {
81636810fdbSxinhui pan 		if (r == -EAGAIN) {
81736810fdbSxinhui pan 			amdgpu_ras_request_reset_on_boot(adev,
818145b03ebSTao Zhou 					ras_block->block);
81936810fdbSxinhui pan 			r = 0;
82036810fdbSxinhui pan 		}
821791c4769Sxinhui pan 		goto feature;
82236810fdbSxinhui pan 	}
823791c4769Sxinhui pan 
824791c4769Sxinhui pan 	ih_info.head = **ras_if;
825145b03ebSTao Zhou 	fs_info->head = **ras_if;
826791c4769Sxinhui pan 
827145b03ebSTao Zhou 	if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
828791c4769Sxinhui pan 		r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
829791c4769Sxinhui pan 		if (r)
830791c4769Sxinhui pan 			goto interrupt;
831145b03ebSTao Zhou 	}
832791c4769Sxinhui pan 
833145b03ebSTao Zhou 	amdgpu_ras_debugfs_create(adev, fs_info);
834791c4769Sxinhui pan 
835145b03ebSTao Zhou 	r = amdgpu_ras_sysfs_create(adev, fs_info);
836791c4769Sxinhui pan 	if (r)
837791c4769Sxinhui pan 		goto sysfs;
838acbbee01Sxinhui pan resume:
839145b03ebSTao Zhou 	if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
840791c4769Sxinhui pan 		r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
841791c4769Sxinhui pan 		if (r)
842791c4769Sxinhui pan 			goto irq;
843145b03ebSTao Zhou 	}
844791c4769Sxinhui pan 
845791c4769Sxinhui pan 	return 0;
846791c4769Sxinhui pan irq:
847791c4769Sxinhui pan 	amdgpu_ras_sysfs_remove(adev, *ras_if);
848791c4769Sxinhui pan sysfs:
849791c4769Sxinhui pan 	amdgpu_ras_debugfs_remove(adev, *ras_if);
850145b03ebSTao Zhou 	if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
851791c4769Sxinhui pan 		amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
852791c4769Sxinhui pan interrupt:
853791c4769Sxinhui pan 	amdgpu_ras_feature_enable(adev, *ras_if, 0);
854791c4769Sxinhui pan feature:
855791c4769Sxinhui pan 	kfree(*ras_if);
856791c4769Sxinhui pan 	*ras_if = NULL;
85736810fdbSxinhui pan 	return r;
858791c4769Sxinhui pan }
859791c4769Sxinhui pan 
860145b03ebSTao Zhou static int gmc_v9_0_ecc_late_init(void *handle)
861145b03ebSTao Zhou {
862145b03ebSTao Zhou 	int r;
863145b03ebSTao Zhou 
864145b03ebSTao Zhou 	struct ras_fs_if umc_fs_info = {
865145b03ebSTao Zhou 		.sysfs_name = "umc_err_count",
866145b03ebSTao Zhou 		.debugfs_name = "umc_err_inject",
867145b03ebSTao Zhou 	};
868145b03ebSTao Zhou 	struct ras_common_if umc_ras_block = {
869145b03ebSTao Zhou 		.block = AMDGPU_RAS_BLOCK__UMC,
870145b03ebSTao Zhou 		.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
871145b03ebSTao Zhou 		.sub_block_index = 0,
872145b03ebSTao Zhou 		.name = "umc",
873145b03ebSTao Zhou 	};
874145b03ebSTao Zhou 	struct ras_fs_if mmhub_fs_info = {
875145b03ebSTao Zhou 		.sysfs_name = "mmhub_err_count",
876145b03ebSTao Zhou 		.debugfs_name = "mmhub_err_inject",
877145b03ebSTao Zhou 	};
878145b03ebSTao Zhou 	struct ras_common_if mmhub_ras_block = {
879145b03ebSTao Zhou 		.block = AMDGPU_RAS_BLOCK__MMHUB,
880145b03ebSTao Zhou 		.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
881145b03ebSTao Zhou 		.sub_block_index = 0,
882145b03ebSTao Zhou 		.name = "mmhub",
883145b03ebSTao Zhou 	};
884145b03ebSTao Zhou 
885145b03ebSTao Zhou 	r = gmc_v9_0_ecc_ras_block_late_init(handle,
886145b03ebSTao Zhou 			&umc_fs_info, &umc_ras_block);
887145b03ebSTao Zhou 	if (r)
888145b03ebSTao Zhou 		return r;
889145b03ebSTao Zhou 
890145b03ebSTao Zhou 	r = gmc_v9_0_ecc_ras_block_late_init(handle,
891145b03ebSTao Zhou 			&mmhub_fs_info, &mmhub_ras_block);
892145b03ebSTao Zhou 	return r;
893145b03ebSTao Zhou }
894791c4769Sxinhui pan 
895e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
896e60f8db5SAlex Xie {
897e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
898f49ea9f8SHawking Zhang 	bool r;
8994789c463SChristian König 
900cd2b5623SAlex Deucher 	if (!gmc_v9_0_keep_stolen_memory(adev))
901cd2b5623SAlex Deucher 		amdgpu_bo_late_init(adev);
9026f752ec2SAndrey Grodzovsky 
903c713a461SEvan Quan 	r = gmc_v9_0_allocate_vm_inv_eng(adev);
904c713a461SEvan Quan 	if (r)
905c713a461SEvan Quan 		return r;
906f49ea9f8SHawking Zhang 	/* Check if ecc is available */
907f49ea9f8SHawking Zhang 	if (!amdgpu_sriov_vf(adev)) {
908f49ea9f8SHawking Zhang 		switch (adev->asic_type) {
909f49ea9f8SHawking Zhang 		case CHIP_VEGA10:
910f49ea9f8SHawking Zhang 		case CHIP_VEGA20:
911f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_mem_ecc_supported(adev);
912f49ea9f8SHawking Zhang 			if (!r) {
91302bab923SDavid Panariti 				DRM_INFO("ECC is not present.\n");
914f49ea9f8SHawking Zhang 				if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
915e1d1a772SAlex Deucher 					adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
91602bab923SDavid Panariti 			} else {
917f49ea9f8SHawking Zhang 				DRM_INFO("ECC is active.\n");
918f49ea9f8SHawking Zhang 			}
919f49ea9f8SHawking Zhang 
920f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_sram_ecc_supported(adev);
921f49ea9f8SHawking Zhang 			if (!r) {
922f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is not present.\n");
923f49ea9f8SHawking Zhang 			} else {
924f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is active.\n");
925f49ea9f8SHawking Zhang 			}
926f49ea9f8SHawking Zhang 			break;
927f49ea9f8SHawking Zhang 		default:
928f49ea9f8SHawking Zhang 			break;
92902bab923SDavid Panariti 		}
9305ba4fa35SAlex Deucher 	}
93102bab923SDavid Panariti 
932791c4769Sxinhui pan 	r = gmc_v9_0_ecc_late_init(handle);
933791c4769Sxinhui pan 	if (r)
934e60f8db5SAlex Xie 		return r;
935e60f8db5SAlex Xie 
936770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
937e60f8db5SAlex Xie }
938e60f8db5SAlex Xie 
939e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
940770d13b1SChristian König 					struct amdgpu_gmc *mc)
941e60f8db5SAlex Xie {
942e60f8db5SAlex Xie 	u64 base = 0;
9439d4f837aSFrank.Min 
94451cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
94551cce480SLe Ma 		base = mmhub_v9_4_get_fb_location(adev);
9469d4f837aSFrank.Min 	else if (!amdgpu_sriov_vf(adev))
947e60f8db5SAlex Xie 		base = mmhub_v1_0_get_fb_location(adev);
9489d4f837aSFrank.Min 
9496fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
9506fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
95183afe835SOak Zeng 	amdgpu_gmc_vram_location(adev, mc, base);
952961c75cfSChristian König 	amdgpu_gmc_gart_location(adev, mc);
953c3e1b43cSChristian König 	amdgpu_gmc_agp_location(adev, mc);
954e60f8db5SAlex Xie 	/* base offset of vram pages */
955e60f8db5SAlex Xie 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
9566fdd68b1SAlex Deucher 
9576fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
9586fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
9596fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
960e60f8db5SAlex Xie }
961e60f8db5SAlex Xie 
962e60f8db5SAlex Xie /**
963e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
964e60f8db5SAlex Xie  *
965e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
966e60f8db5SAlex Xie  *
967e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
968e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
969e60f8db5SAlex Xie  * Returns 0 for success.
970e60f8db5SAlex Xie  */
971e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
972e60f8db5SAlex Xie {
973e60f8db5SAlex Xie 	int chansize, numchan;
974e60f8db5SAlex Xie 	int r;
975e60f8db5SAlex Xie 
976067e75b3SAlex Deucher 	if (amdgpu_sriov_vf(adev)) {
977067e75b3SAlex Deucher 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
978067e75b3SAlex Deucher 		 * and DF related registers is not readable, seems hardcord is the
979067e75b3SAlex Deucher 		 * only way to set the correct vram_width
980067e75b3SAlex Deucher 		 */
981067e75b3SAlex Deucher 		adev->gmc.vram_width = 2048;
982067e75b3SAlex Deucher 	} else if (amdgpu_emu_mode != 1) {
983770d13b1SChristian König 		adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
984067e75b3SAlex Deucher 	}
985067e75b3SAlex Deucher 
986770d13b1SChristian König 	if (!adev->gmc.vram_width) {
987e60f8db5SAlex Xie 		/* hbm memory channel size */
988585b7f16STom St Denis 		if (adev->flags & AMD_IS_APU)
989585b7f16STom St Denis 			chansize = 64;
990585b7f16STom St Denis 		else
991e60f8db5SAlex Xie 			chansize = 128;
992e60f8db5SAlex Xie 
993070706c0SHawking Zhang 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
994770d13b1SChristian König 		adev->gmc.vram_width = numchan * chansize;
995e60f8db5SAlex Xie 	}
996e60f8db5SAlex Xie 
997e60f8db5SAlex Xie 	/* size in MB on si */
998770d13b1SChristian König 	adev->gmc.mc_vram_size =
999bf383fb6SAlex Deucher 		adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1000770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1001e60f8db5SAlex Xie 
1002e60f8db5SAlex Xie 	if (!(adev->flags & AMD_IS_APU)) {
1003e60f8db5SAlex Xie 		r = amdgpu_device_resize_fb_bar(adev);
1004e60f8db5SAlex Xie 		if (r)
1005e60f8db5SAlex Xie 			return r;
1006e60f8db5SAlex Xie 	}
1007770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1008770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1009e60f8db5SAlex Xie 
1010156a81beSChunming Zhou #ifdef CONFIG_X86_64
1011156a81beSChunming Zhou 	if (adev->flags & AMD_IS_APU) {
1012156a81beSChunming Zhou 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
1013156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
1014156a81beSChunming Zhou 	}
1015156a81beSChunming Zhou #endif
1016e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
1017770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
1018770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1019770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1020e60f8db5SAlex Xie 
1021e60f8db5SAlex Xie 	/* set the gart size */
1022e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1) {
1023e60f8db5SAlex Xie 		switch (adev->asic_type) {
1024e60f8db5SAlex Xie 		case CHIP_VEGA10:  /* all engines support GPUVM */
1025273a14cdSAlex Deucher 		case CHIP_VEGA12:  /* all engines support GPUVM */
1026d96b428cSFeifei Xu 		case CHIP_VEGA20:
10273de2ff5dSLe Ma 		case CHIP_ARCTURUS:
1028e60f8db5SAlex Xie 		default:
1029fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
1030e60f8db5SAlex Xie 			break;
1031e60f8db5SAlex Xie 		case CHIP_RAVEN:   /* DCE SG support */
10328787ee01SHuang Rui 		case CHIP_RENOIR:
1033770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
1034e60f8db5SAlex Xie 			break;
1035e60f8db5SAlex Xie 		}
1036e60f8db5SAlex Xie 	} else {
1037770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1038e60f8db5SAlex Xie 	}
1039e60f8db5SAlex Xie 
1040770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1041e60f8db5SAlex Xie 
1042e60f8db5SAlex Xie 	return 0;
1043e60f8db5SAlex Xie }
1044e60f8db5SAlex Xie 
1045e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1046e60f8db5SAlex Xie {
1047e60f8db5SAlex Xie 	int r;
1048e60f8db5SAlex Xie 
10491123b989SChristian König 	if (adev->gart.bo) {
1050e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
1051e60f8db5SAlex Xie 		return 0;
1052e60f8db5SAlex Xie 	}
1053e60f8db5SAlex Xie 	/* Initialize common gart structure */
1054e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
1055e60f8db5SAlex Xie 	if (r)
1056e60f8db5SAlex Xie 		return r;
1057e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
10587596ab68SHawking Zhang 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1059e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
1060e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
1061e60f8db5SAlex Xie }
1062e60f8db5SAlex Xie 
1063ebdef28eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1064ebdef28eSAlex Deucher {
1065bfa3a9bbSHawking Zhang 	u32 d1vga_control;
1066ebdef28eSAlex Deucher 	unsigned size;
1067ebdef28eSAlex Deucher 
10686f752ec2SAndrey Grodzovsky 	/*
10696f752ec2SAndrey Grodzovsky 	 * TODO Remove once GART corruption is resolved
10706f752ec2SAndrey Grodzovsky 	 * Check related code in gmc_v9_0_sw_fini
10716f752ec2SAndrey Grodzovsky 	 * */
1072cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
1073cd2b5623SAlex Deucher 		return 9 * 1024 * 1024;
10746f752ec2SAndrey Grodzovsky 
1075bfa3a9bbSHawking Zhang 	d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1076ebdef28eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1077ebdef28eSAlex Deucher 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
1078ebdef28eSAlex Deucher 	} else {
1079ebdef28eSAlex Deucher 		u32 viewport;
1080ebdef28eSAlex Deucher 
1081ebdef28eSAlex Deucher 		switch (adev->asic_type) {
1082ebdef28eSAlex Deucher 		case CHIP_RAVEN:
10838787ee01SHuang Rui 		case CHIP_RENOIR:
1084ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1085ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
1086ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1087ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport,
1088ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1089ebdef28eSAlex Deucher 				4);
1090ebdef28eSAlex Deucher 			break;
1091ebdef28eSAlex Deucher 		case CHIP_VEGA10:
1092ebdef28eSAlex Deucher 		case CHIP_VEGA12:
1093cd2b5623SAlex Deucher 		case CHIP_VEGA20:
1094ebdef28eSAlex Deucher 		default:
1095ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1096ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1097ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1098ebdef28eSAlex Deucher 				4);
1099ebdef28eSAlex Deucher 			break;
1100ebdef28eSAlex Deucher 		}
1101ebdef28eSAlex Deucher 	}
1102ebdef28eSAlex Deucher 	/* return 0 if the pre-OS buffer uses up most of vram */
1103ebdef28eSAlex Deucher 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1104ebdef28eSAlex Deucher 		return 0;
11056f752ec2SAndrey Grodzovsky 
1106ebdef28eSAlex Deucher 	return size;
1107ebdef28eSAlex Deucher }
1108ebdef28eSAlex Deucher 
1109e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
1110e60f8db5SAlex Xie {
1111e60f8db5SAlex Xie 	int r;
1112e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1113e60f8db5SAlex Xie 
1114e60f8db5SAlex Xie 	gfxhub_v1_0_init(adev);
111551cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
111651cce480SLe Ma 		mmhub_v9_4_init(adev);
111751cce480SLe Ma 	else
1118e60f8db5SAlex Xie 		mmhub_v1_0_init(adev);
1119e60f8db5SAlex Xie 
1120770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
1121e60f8db5SAlex Xie 
11221e09b053SHawking Zhang 	adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
1123e60f8db5SAlex Xie 	switch (adev->asic_type) {
1124e60f8db5SAlex Xie 	case CHIP_RAVEN:
11251daa2bfaSLe Ma 		adev->num_vmhubs = 2;
11261daa2bfaSLe Ma 
11276a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1128f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
11296a42fd6fSChristian König 		} else {
11306a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
11316a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1132770d13b1SChristian König 			adev->gmc.translate_further =
11336a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
11346a42fd6fSChristian König 		}
1135e60f8db5SAlex Xie 		break;
1136e60f8db5SAlex Xie 	case CHIP_VEGA10:
1137273a14cdSAlex Deucher 	case CHIP_VEGA12:
1138d96b428cSFeifei Xu 	case CHIP_VEGA20:
11398787ee01SHuang Rui 	case CHIP_RENOIR:
11401daa2bfaSLe Ma 		adev->num_vmhubs = 2;
11411daa2bfaSLe Ma 
11428787ee01SHuang Rui 
1143e60f8db5SAlex Xie 		/*
1144e60f8db5SAlex Xie 		 * To fulfill 4-level page support,
1145e60f8db5SAlex Xie 		 * vm size is 256TB (48bit), maximum size of Vega10,
1146e60f8db5SAlex Xie 		 * block size 512 (9bit)
1147e60f8db5SAlex Xie 		 */
1148cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1149cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
1150cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1151cdba61daSwentalou 		else
1152f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1153e60f8db5SAlex Xie 		break;
11543de2ff5dSLe Ma 	case CHIP_ARCTURUS:
1155c8a6e2a3SLe Ma 		adev->num_vmhubs = 3;
1156c8a6e2a3SLe Ma 
11573de2ff5dSLe Ma 		/* Keep the vm size same with Vega20 */
11583de2ff5dSLe Ma 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
11593de2ff5dSLe Ma 		break;
1160e60f8db5SAlex Xie 	default:
1161e60f8db5SAlex Xie 		break;
1162e60f8db5SAlex Xie 	}
1163e60f8db5SAlex Xie 
1164e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
116544a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1166770d13b1SChristian König 				&adev->gmc.vm_fault);
116730da7bb1SChristian König 	if (r)
116830da7bb1SChristian König 		return r;
116930da7bb1SChristian König 
11707d19b15fSLe Ma 	if (adev->asic_type == CHIP_ARCTURUS) {
11717d19b15fSLe Ma 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
11727d19b15fSLe Ma 					&adev->gmc.vm_fault);
11737d19b15fSLe Ma 		if (r)
11747d19b15fSLe Ma 			return r;
11757d19b15fSLe Ma 	}
11767d19b15fSLe Ma 
117744a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1178770d13b1SChristian König 				&adev->gmc.vm_fault);
1179e60f8db5SAlex Xie 
1180e60f8db5SAlex Xie 	if (r)
1181e60f8db5SAlex Xie 		return r;
1182e60f8db5SAlex Xie 
1183791c4769Sxinhui pan 	/* interrupt sent to DF. */
1184791c4769Sxinhui pan 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1185791c4769Sxinhui pan 			&adev->gmc.ecc_irq);
1186791c4769Sxinhui pan 	if (r)
1187791c4769Sxinhui pan 		return r;
1188791c4769Sxinhui pan 
1189e60f8db5SAlex Xie 	/* Set the internal MC address mask
1190e60f8db5SAlex Xie 	 * This is the max address of the GPU's
1191e60f8db5SAlex Xie 	 * internal address space.
1192e60f8db5SAlex Xie 	 */
1193770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1194e60f8db5SAlex Xie 
1195244511f3SChristoph Hellwig 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1196e60f8db5SAlex Xie 	if (r) {
1197e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1198244511f3SChristoph Hellwig 		return r;
1199e60f8db5SAlex Xie 	}
1200244511f3SChristoph Hellwig 	adev->need_swiotlb = drm_need_swiotlb(44);
1201e60f8db5SAlex Xie 
120247622ba0SAlex Deucher 	if (adev->gmc.xgmi.supported) {
1203bf0a60b7SAlex Deucher 		r = gfxhub_v1_1_get_xgmi_info(adev);
1204bf0a60b7SAlex Deucher 		if (r)
1205bf0a60b7SAlex Deucher 			return r;
1206bf0a60b7SAlex Deucher 	}
1207bf0a60b7SAlex Deucher 
1208e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
1209e60f8db5SAlex Xie 	if (r)
1210e60f8db5SAlex Xie 		return r;
1211e60f8db5SAlex Xie 
1212ebdef28eSAlex Deucher 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1213ebdef28eSAlex Deucher 
1214e60f8db5SAlex Xie 	/* Memory manager */
1215e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
1216e60f8db5SAlex Xie 	if (r)
1217e60f8db5SAlex Xie 		return r;
1218e60f8db5SAlex Xie 
1219e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
1220e60f8db5SAlex Xie 	if (r)
1221e60f8db5SAlex Xie 		return r;
1222e60f8db5SAlex Xie 
122305ec3edaSChristian König 	/*
122405ec3edaSChristian König 	 * number of VMs
122505ec3edaSChristian König 	 * VMID 0 is reserved for System
122605ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
122705ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
122805ec3edaSChristian König 	 */
1229a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1230a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1231c8a6e2a3SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
123205ec3edaSChristian König 
123305ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
123405ec3edaSChristian König 
123505ec3edaSChristian König 	return 0;
1236e60f8db5SAlex Xie }
1237e60f8db5SAlex Xie 
1238e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
1239e60f8db5SAlex Xie {
1240e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1241994dcfaaSTianci.Yin 	void *stolen_vga_buf;
1242e60f8db5SAlex Xie 
1243791c4769Sxinhui pan 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1244145b03ebSTao Zhou 			adev->gmc.umc_ras_if) {
1245145b03ebSTao Zhou 		struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
1246791c4769Sxinhui pan 		struct ras_ih_if ih_info = {
1247791c4769Sxinhui pan 			.head = *ras_if,
1248791c4769Sxinhui pan 		};
1249791c4769Sxinhui pan 
1250791c4769Sxinhui pan 		/* remove fs first */
1251791c4769Sxinhui pan 		amdgpu_ras_debugfs_remove(adev, ras_if);
1252791c4769Sxinhui pan 		amdgpu_ras_sysfs_remove(adev, ras_if);
1253791c4769Sxinhui pan 		/* remove the IH */
1254791c4769Sxinhui pan 		amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1255791c4769Sxinhui pan 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1256791c4769Sxinhui pan 		kfree(ras_if);
1257791c4769Sxinhui pan 	}
1258791c4769Sxinhui pan 
1259145b03ebSTao Zhou 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
1260145b03ebSTao Zhou 			adev->gmc.mmhub_ras_if) {
1261145b03ebSTao Zhou 		struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
1262145b03ebSTao Zhou 
1263145b03ebSTao Zhou 		/* remove fs and disable ras feature */
1264145b03ebSTao Zhou 		amdgpu_ras_debugfs_remove(adev, ras_if);
1265145b03ebSTao Zhou 		amdgpu_ras_sysfs_remove(adev, ras_if);
1266145b03ebSTao Zhou 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1267145b03ebSTao Zhou 		kfree(ras_if);
1268145b03ebSTao Zhou 	}
1269145b03ebSTao Zhou 
1270f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
1271e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
12726f752ec2SAndrey Grodzovsky 
1273cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
1274994dcfaaSTianci.Yin 		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
12756f752ec2SAndrey Grodzovsky 
1276a3d9103eSAndrey Grodzovsky 	amdgpu_gart_table_vram_free(adev);
1277e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
1278a3d9103eSAndrey Grodzovsky 	amdgpu_gart_fini(adev);
1279e60f8db5SAlex Xie 
1280e60f8db5SAlex Xie 	return 0;
1281e60f8db5SAlex Xie }
1282e60f8db5SAlex Xie 
1283e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1284e60f8db5SAlex Xie {
1285946a4d5bSShaoyun Liu 
1286e60f8db5SAlex Xie 	switch (adev->asic_type) {
1287e60f8db5SAlex Xie 	case CHIP_VEGA10:
12884cd4c5c0SMonk Liu 		if (amdgpu_sriov_vf(adev))
128998cad2deSTrigger Huang 			break;
129098cad2deSTrigger Huang 		/* fall through */
1291d96b428cSFeifei Xu 	case CHIP_VEGA20:
1292946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
12935c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
1294c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1295946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
12965c583018SEvan Quan 						golden_settings_athub_1_0_0,
1297c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1298e60f8db5SAlex Xie 		break;
1299273a14cdSAlex Deucher 	case CHIP_VEGA12:
1300273a14cdSAlex Deucher 		break;
1301e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
13028787ee01SHuang Rui 		/* TODO for renoir */
1303946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
13045c583018SEvan Quan 						golden_settings_athub_1_0_0,
1305c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1306e4f3abaaSChunming Zhou 		break;
1307e60f8db5SAlex Xie 	default:
1308e60f8db5SAlex Xie 		break;
1309e60f8db5SAlex Xie 	}
1310e60f8db5SAlex Xie }
1311e60f8db5SAlex Xie 
1312e60f8db5SAlex Xie /**
1313e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1314e60f8db5SAlex Xie  *
1315e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1316e60f8db5SAlex Xie  */
1317e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1318e60f8db5SAlex Xie {
13193ff98548SOak Zeng 	int r, i;
1320e60f8db5SAlex Xie 	bool value;
1321e60f8db5SAlex Xie 	u32 tmp;
1322e60f8db5SAlex Xie 
13239c3f2b54SAlex Deucher 	amdgpu_device_program_register_sequence(adev,
1324e60f8db5SAlex Xie 						golden_settings_vega10_hdp,
1325c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_vega10_hdp));
1326e60f8db5SAlex Xie 
13271123b989SChristian König 	if (adev->gart.bo == NULL) {
1328e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1329e60f8db5SAlex Xie 		return -EINVAL;
1330e60f8db5SAlex Xie 	}
1331ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
1332ce1b1b66SMonk Liu 	if (r)
1333ce1b1b66SMonk Liu 		return r;
1334e60f8db5SAlex Xie 
13352fcd43ceSHawking Zhang 	switch (adev->asic_type) {
13362fcd43ceSHawking Zhang 	case CHIP_RAVEN:
13378787ee01SHuang Rui 		/* TODO for renoir */
1338f8386b35SHawking Zhang 		mmhub_v1_0_update_power_gating(adev, true);
13392fcd43ceSHawking Zhang 		break;
13402fcd43ceSHawking Zhang 	default:
13412fcd43ceSHawking Zhang 		break;
13422fcd43ceSHawking Zhang 	}
13432fcd43ceSHawking Zhang 
1344e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
1345e60f8db5SAlex Xie 	if (r)
1346e60f8db5SAlex Xie 		return r;
1347e60f8db5SAlex Xie 
134851cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
134951cce480SLe Ma 		r = mmhub_v9_4_gart_enable(adev);
135051cce480SLe Ma 	else
1351e60f8db5SAlex Xie 		r = mmhub_v1_0_gart_enable(adev);
1352e60f8db5SAlex Xie 	if (r)
1353e60f8db5SAlex Xie 		return r;
1354e60f8db5SAlex Xie 
1355846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1356e60f8db5SAlex Xie 
1357b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1358b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1359e60f8db5SAlex Xie 
1360fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1361fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1362fe2b5323STiecheng Zhou 
13631d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
136469882565SChristian König 	adev->nbio_funcs->hdp_flush(adev, NULL);
13651d4e0a8cSMonk Liu 
1366e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1367e60f8db5SAlex Xie 		value = false;
1368e60f8db5SAlex Xie 	else
1369e60f8db5SAlex Xie 		value = true;
1370e60f8db5SAlex Xie 
1371e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
137251cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
137351cce480SLe Ma 		mmhub_v9_4_set_fault_enable_default(adev, value);
137451cce480SLe Ma 	else
1375e60f8db5SAlex Xie 		mmhub_v1_0_set_fault_enable_default(adev, value);
13763ff98548SOak Zeng 
13773ff98548SOak Zeng 	for (i = 0; i < adev->num_vmhubs; ++i)
13783ff98548SOak Zeng 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1379e60f8db5SAlex Xie 
1380e60f8db5SAlex Xie 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1381770d13b1SChristian König 		 (unsigned)(adev->gmc.gart_size >> 20),
13824e830fb1SChristian König 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1383e60f8db5SAlex Xie 	adev->gart.ready = true;
1384e60f8db5SAlex Xie 	return 0;
1385e60f8db5SAlex Xie }
1386e60f8db5SAlex Xie 
1387e60f8db5SAlex Xie static int gmc_v9_0_hw_init(void *handle)
1388e60f8db5SAlex Xie {
1389e60f8db5SAlex Xie 	int r;
1390e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1391e60f8db5SAlex Xie 
1392e60f8db5SAlex Xie 	/* The sequence of these two function calls matters.*/
1393e60f8db5SAlex Xie 	gmc_v9_0_init_golden_registers(adev);
1394e60f8db5SAlex Xie 
1395edca2d05SAlex Deucher 	if (adev->mode_info.num_crtc) {
1396edca2d05SAlex Deucher 		/* Lockout access through VGA aperture*/
13974d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1398edca2d05SAlex Deucher 
1399edca2d05SAlex Deucher 		/* disable VGA render */
14004d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1401edca2d05SAlex Deucher 	}
1402edca2d05SAlex Deucher 
1403e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1404e60f8db5SAlex Xie 
1405e60f8db5SAlex Xie 	return r;
1406e60f8db5SAlex Xie }
1407e60f8db5SAlex Xie 
1408e60f8db5SAlex Xie /**
1409e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1410e60f8db5SAlex Xie  *
1411e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1412e60f8db5SAlex Xie  *
1413e60f8db5SAlex Xie  * This disables all VM page table.
1414e60f8db5SAlex Xie  */
1415e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1416e60f8db5SAlex Xie {
1417e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
141851cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
141951cce480SLe Ma 		mmhub_v9_4_gart_disable(adev);
142051cce480SLe Ma 	else
1421e60f8db5SAlex Xie 		mmhub_v1_0_gart_disable(adev);
1422ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1423e60f8db5SAlex Xie }
1424e60f8db5SAlex Xie 
1425e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1426e60f8db5SAlex Xie {
1427e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1428e60f8db5SAlex Xie 
14295dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
14305dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
14315dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
14325dd696aeSTrigger Huang 		return 0;
14335dd696aeSTrigger Huang 	}
14345dd696aeSTrigger Huang 
1435791c4769Sxinhui pan 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1436770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1437e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1438e60f8db5SAlex Xie 
1439e60f8db5SAlex Xie 	return 0;
1440e60f8db5SAlex Xie }
1441e60f8db5SAlex Xie 
1442e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1443e60f8db5SAlex Xie {
1444e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1445e60f8db5SAlex Xie 
1446f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1447e60f8db5SAlex Xie }
1448e60f8db5SAlex Xie 
1449e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1450e60f8db5SAlex Xie {
1451e60f8db5SAlex Xie 	int r;
1452e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1453e60f8db5SAlex Xie 
1454e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1455e60f8db5SAlex Xie 	if (r)
1456e60f8db5SAlex Xie 		return r;
1457e60f8db5SAlex Xie 
1458620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1459e60f8db5SAlex Xie 
146032601d48SChristian König 	return 0;
1461e60f8db5SAlex Xie }
1462e60f8db5SAlex Xie 
1463e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1464e60f8db5SAlex Xie {
1465e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1466e60f8db5SAlex Xie 	return true;
1467e60f8db5SAlex Xie }
1468e60f8db5SAlex Xie 
1469e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1470e60f8db5SAlex Xie {
1471e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1472e60f8db5SAlex Xie 	return 0;
1473e60f8db5SAlex Xie }
1474e60f8db5SAlex Xie 
1475e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1476e60f8db5SAlex Xie {
1477e60f8db5SAlex Xie 	/* XXX for emulation.*/
1478e60f8db5SAlex Xie 	return 0;
1479e60f8db5SAlex Xie }
1480e60f8db5SAlex Xie 
1481e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1482e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1483e60f8db5SAlex Xie {
1484d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1485d5583d4fSHuang Rui 
148651cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
1487cb15e804SLe Ma 		mmhub_v9_4_set_clockgating(adev, state);
1488cb15e804SLe Ma 	else
1489bee7b51aSLe Ma 		mmhub_v1_0_set_clockgating(adev, state);
1490bee7b51aSLe Ma 
1491bee7b51aSLe Ma 	athub_v1_0_set_clockgating(adev, state);
1492bee7b51aSLe Ma 
1493bee7b51aSLe Ma 	return 0;
1494e60f8db5SAlex Xie }
1495e60f8db5SAlex Xie 
149613052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
149713052be5SHuang Rui {
149813052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
149913052be5SHuang Rui 
150051cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
1501cb15e804SLe Ma 		mmhub_v9_4_get_clockgating(adev, flags);
1502cb15e804SLe Ma 	else
150313052be5SHuang Rui 		mmhub_v1_0_get_clockgating(adev, flags);
1504bee7b51aSLe Ma 
1505bee7b51aSLe Ma 	athub_v1_0_get_clockgating(adev, flags);
150613052be5SHuang Rui }
150713052be5SHuang Rui 
1508e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1509e60f8db5SAlex Xie 					enum amd_powergating_state state)
1510e60f8db5SAlex Xie {
1511e60f8db5SAlex Xie 	return 0;
1512e60f8db5SAlex Xie }
1513e60f8db5SAlex Xie 
1514e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1515e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1516e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1517e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1518e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1519e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1520e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1521e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1522e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1523e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1524e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1525e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1526e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1527e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1528e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
152913052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1530e60f8db5SAlex Xie };
1531e60f8db5SAlex Xie 
1532e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1533e60f8db5SAlex Xie {
1534e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1535e60f8db5SAlex Xie 	.major = 9,
1536e60f8db5SAlex Xie 	.minor = 0,
1537e60f8db5SAlex Xie 	.rev = 0,
1538e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1539e60f8db5SAlex Xie };
1540