xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision c5b6e585)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23f867723bSSam Ravnborg 
24e60f8db5SAlex Xie #include <linux/firmware.h>
25f867723bSSam Ravnborg #include <linux/pci.h>
26f867723bSSam Ravnborg 
27fd5fd480SChunming Zhou #include <drm/drm_cache.h>
28f867723bSSam Ravnborg 
29e60f8db5SAlex Xie #include "amdgpu.h"
30e60f8db5SAlex Xie #include "gmc_v9_0.h"
318d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
322cddc50eSHuang Rui #include "amdgpu_gem.h"
33e60f8db5SAlex Xie 
3475199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3575199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
36cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
37135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
38135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
39fb960bd2SFeifei Xu #include "vega10_enum.h"
4065417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
416ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
42250b4228SChristian König #include "oss/osssys_4_0_offset.h"
43e60f8db5SAlex Xie 
44946a4d5bSShaoyun Liu #include "soc15.h"
45e60f8db5SAlex Xie #include "soc15_common.h"
4690c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
47e60f8db5SAlex Xie 
48e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
49e60f8db5SAlex Xie #include "mmhub_v1_0.h"
50bee7b51aSLe Ma #include "athub_v1_0.h"
51bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
5251cce480SLe Ma #include "mmhub_v9_4.h"
535b6b35aaSHawking Zhang #include "umc_v6_1.h"
54e60f8db5SAlex Xie 
5544a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
5644a99b65SAndrey Grodzovsky 
57791c4769Sxinhui pan #include "amdgpu_ras.h"
58791c4769Sxinhui pan 
59ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
60ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
61ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
62ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
63ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
64ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
65ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
66ebdef28eSAlex Deucher 
67e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
68e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
69e60f8db5SAlex Xie 
70e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
71e60f8db5SAlex Xie {
72e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
73e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
74e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
75e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
76e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
77e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
78e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
79e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
80e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
81e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
82e60f8db5SAlex Xie };
83e60f8db5SAlex Xie 
84946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
855c583018SEvan Quan {
86946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
87946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
885c583018SEvan Quan };
895c583018SEvan Quan 
90946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
915c583018SEvan Quan {
92946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
93946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
945c583018SEvan Quan };
955c583018SEvan Quan 
96791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
97791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
98791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
99791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
100791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
101791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
102791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
103791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
104791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
105791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
106791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
107791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
108791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
109791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
110791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
111791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
112791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
113791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
114791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
115791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
116791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
117791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
118791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
119791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
120791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
121791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
122791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
123791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
124791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
125791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
126791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
127791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
128791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
12902bab923SDavid Panariti };
13002bab923SDavid Panariti 
131791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
132791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
133791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
134791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
135791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
136791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
137791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
138791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
139791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
140791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
141791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
142791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
143791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
144791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
145791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
146791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
147791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
148791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
149791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
150791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
151791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
152791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
153791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
154791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
155791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
156791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
157791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
158791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
159791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
160791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
161791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
162791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
163791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
16402bab923SDavid Panariti };
16502bab923SDavid Panariti 
166791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_status_addrs[] = {
167791c4769Sxinhui pan 	(0x000143c2 + 0x00000000),
168791c4769Sxinhui pan 	(0x000143c2 + 0x00000800),
169791c4769Sxinhui pan 	(0x000143c2 + 0x00001000),
170791c4769Sxinhui pan 	(0x000143c2 + 0x00001800),
171791c4769Sxinhui pan 	(0x000543c2 + 0x00000000),
172791c4769Sxinhui pan 	(0x000543c2 + 0x00000800),
173791c4769Sxinhui pan 	(0x000543c2 + 0x00001000),
174791c4769Sxinhui pan 	(0x000543c2 + 0x00001800),
175791c4769Sxinhui pan 	(0x000943c2 + 0x00000000),
176791c4769Sxinhui pan 	(0x000943c2 + 0x00000800),
177791c4769Sxinhui pan 	(0x000943c2 + 0x00001000),
178791c4769Sxinhui pan 	(0x000943c2 + 0x00001800),
179791c4769Sxinhui pan 	(0x000d43c2 + 0x00000000),
180791c4769Sxinhui pan 	(0x000d43c2 + 0x00000800),
181791c4769Sxinhui pan 	(0x000d43c2 + 0x00001000),
182791c4769Sxinhui pan 	(0x000d43c2 + 0x00001800),
183791c4769Sxinhui pan 	(0x001143c2 + 0x00000000),
184791c4769Sxinhui pan 	(0x001143c2 + 0x00000800),
185791c4769Sxinhui pan 	(0x001143c2 + 0x00001000),
186791c4769Sxinhui pan 	(0x001143c2 + 0x00001800),
187791c4769Sxinhui pan 	(0x001543c2 + 0x00000000),
188791c4769Sxinhui pan 	(0x001543c2 + 0x00000800),
189791c4769Sxinhui pan 	(0x001543c2 + 0x00001000),
190791c4769Sxinhui pan 	(0x001543c2 + 0x00001800),
191791c4769Sxinhui pan 	(0x001943c2 + 0x00000000),
192791c4769Sxinhui pan 	(0x001943c2 + 0x00000800),
193791c4769Sxinhui pan 	(0x001943c2 + 0x00001000),
194791c4769Sxinhui pan 	(0x001943c2 + 0x00001800),
195791c4769Sxinhui pan 	(0x001d43c2 + 0x00000000),
196791c4769Sxinhui pan 	(0x001d43c2 + 0x00000800),
197791c4769Sxinhui pan 	(0x001d43c2 + 0x00001000),
198791c4769Sxinhui pan 	(0x001d43c2 + 0x00001800),
19902bab923SDavid Panariti };
20002bab923SDavid Panariti 
201791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
202791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
203791c4769Sxinhui pan 		unsigned type,
204791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
205791c4769Sxinhui pan {
206791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
207791c4769Sxinhui pan 
208791c4769Sxinhui pan 	bits = 0x7f;
209791c4769Sxinhui pan 
210791c4769Sxinhui pan 	switch (state) {
211791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
212791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
213791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
214791c4769Sxinhui pan 			tmp = RREG32(reg);
215791c4769Sxinhui pan 			tmp &= ~bits;
216791c4769Sxinhui pan 			WREG32(reg, tmp);
217791c4769Sxinhui pan 		}
218791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
219791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
220791c4769Sxinhui pan 			tmp = RREG32(reg);
221791c4769Sxinhui pan 			tmp &= ~bits;
222791c4769Sxinhui pan 			WREG32(reg, tmp);
223791c4769Sxinhui pan 		}
224791c4769Sxinhui pan 		break;
225791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
226791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
227791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
228791c4769Sxinhui pan 			tmp = RREG32(reg);
229791c4769Sxinhui pan 			tmp |= bits;
230791c4769Sxinhui pan 			WREG32(reg, tmp);
231791c4769Sxinhui pan 		}
232791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
233791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
234791c4769Sxinhui pan 			tmp = RREG32(reg);
235791c4769Sxinhui pan 			tmp |= bits;
236791c4769Sxinhui pan 			WREG32(reg, tmp);
237791c4769Sxinhui pan 		}
238791c4769Sxinhui pan 		break;
239791c4769Sxinhui pan 	default:
240791c4769Sxinhui pan 		break;
241791c4769Sxinhui pan 	}
242791c4769Sxinhui pan 
243791c4769Sxinhui pan 	return 0;
244791c4769Sxinhui pan }
245791c4769Sxinhui pan 
246791c4769Sxinhui pan static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
24781e02619STao Zhou 		struct ras_err_data *err_data,
248791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
249791c4769Sxinhui pan {
2507c6e68c7SAndrey Grodzovsky 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
2519b54d201SEric Huang 		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
252045c0216STao Zhou 		if (adev->umc.funcs->query_ras_error_count)
25381e02619STao Zhou 			adev->umc.funcs->query_ras_error_count(adev, err_data);
25413b7c46cSTao Zhou 		/* umc query_ras_error_address is also responsible for clearing
25513b7c46cSTao Zhou 		 * error status
25613b7c46cSTao Zhou 		 */
25713b7c46cSTao Zhou 		if (adev->umc.funcs->query_ras_error_address)
25813b7c46cSTao Zhou 			adev->umc.funcs->query_ras_error_address(adev, err_data);
25991ba68f8STao Zhou 
26091ba68f8STao Zhou 		/* only uncorrectable error needs gpu reset */
26191ba68f8STao Zhou 		if (err_data->ue_count)
262791c4769Sxinhui pan 			amdgpu_ras_reset_gpu(adev, 0);
2637c6e68c7SAndrey Grodzovsky 	}
26491ba68f8STao Zhou 
265bd2280daSTao Zhou 	return AMDGPU_RAS_SUCCESS;
266791c4769Sxinhui pan }
267791c4769Sxinhui pan 
268791c4769Sxinhui pan static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
269791c4769Sxinhui pan 		struct amdgpu_irq_src *source,
270791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
271791c4769Sxinhui pan {
272145b03ebSTao Zhou 	struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
273791c4769Sxinhui pan 	struct ras_dispatch_if ih_data = {
274791c4769Sxinhui pan 		.entry = entry,
275791c4769Sxinhui pan 	};
27614cfde84Sxinhui pan 
27714cfde84Sxinhui pan 	if (!ras_if)
27814cfde84Sxinhui pan 		return 0;
27914cfde84Sxinhui pan 
28014cfde84Sxinhui pan 	ih_data.head = *ras_if;
28114cfde84Sxinhui pan 
282791c4769Sxinhui pan 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
283791c4769Sxinhui pan 	return 0;
284791c4769Sxinhui pan }
285791c4769Sxinhui pan 
286e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
287e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
288e60f8db5SAlex Xie 					unsigned type,
289e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
290e60f8db5SAlex Xie {
291e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
292ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
293e60f8db5SAlex Xie 
29411250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
29511250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
29611250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
29711250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
29811250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
29911250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
30011250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
30111250164SChristian König 
302e60f8db5SAlex Xie 	switch (state) {
303e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
3041daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
305ae6d1416STom St Denis 			hub = &adev->vmhub[j];
306e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
307e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
308e60f8db5SAlex Xie 				tmp = RREG32(reg);
309e60f8db5SAlex Xie 				tmp &= ~bits;
310e60f8db5SAlex Xie 				WREG32(reg, tmp);
311e60f8db5SAlex Xie 			}
312e60f8db5SAlex Xie 		}
313e60f8db5SAlex Xie 		break;
314e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
3151daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
316ae6d1416STom St Denis 			hub = &adev->vmhub[j];
317e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
318e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
319e60f8db5SAlex Xie 				tmp = RREG32(reg);
320e60f8db5SAlex Xie 				tmp |= bits;
321e60f8db5SAlex Xie 				WREG32(reg, tmp);
322e60f8db5SAlex Xie 			}
323e60f8db5SAlex Xie 		}
324e60f8db5SAlex Xie 	default:
325e60f8db5SAlex Xie 		break;
326e60f8db5SAlex Xie 	}
327e60f8db5SAlex Xie 
328e60f8db5SAlex Xie 	return 0;
329e60f8db5SAlex Xie }
330e60f8db5SAlex Xie 
331e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
332e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
333e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
334e60f8db5SAlex Xie {
33551c60898SLe Ma 	struct amdgpu_vmhub *hub;
336c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
3374d6cbde3SFelix Kuehling 	uint32_t status = 0;
338e60f8db5SAlex Xie 	u64 addr;
33951c60898SLe Ma 	char hub_name[10];
340e60f8db5SAlex Xie 
341e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
342e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
343e60f8db5SAlex Xie 
344c1a8abd9SChristian König 	if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
345c1a8abd9SChristian König 						    entry->timestamp))
34622666cc1SChristian König 		return 1; /* This also prevents sending it to KFD */
34722666cc1SChristian König 
34851c60898SLe Ma 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
34951c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub0");
35051c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
35151c60898SLe Ma 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
35251c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub1");
35351c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_1];
35451c60898SLe Ma 	} else {
35551c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "gfxhub0");
35651c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
35751c60898SLe Ma 	}
35851c60898SLe Ma 
359c1a8abd9SChristian König 	/* If it's the first fault for this address, process it normally */
36079a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
36153499173SXiaojie Yuan 		/*
36253499173SXiaojie Yuan 		 * Issue a dummy read to wait for the status register to
36353499173SXiaojie Yuan 		 * be updated to avoid reading an incorrect value due to
36453499173SXiaojie Yuan 		 * the new fast GRBM interface.
36553499173SXiaojie Yuan 		 */
36653499173SXiaojie Yuan 		if (entry->vmid_src == AMDGPU_GFXHUB_0)
36753499173SXiaojie Yuan 			RREG32(hub->vm_l2_pro_fault_status);
36853499173SXiaojie Yuan 
3695a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
3705a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
3714d6cbde3SFelix Kuehling 	}
372e60f8db5SAlex Xie 
3734d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
37405794effSShirish S 		struct amdgpu_task_info task_info;
375efaa9646SAndrey Grodzovsky 
37605794effSShirish S 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
377efaa9646SAndrey Grodzovsky 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
378efaa9646SAndrey Grodzovsky 
3794d6cbde3SFelix Kuehling 		dev_err(adev->dev,
380c468f9e2SChristian König 			"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
381c468f9e2SChristian König 			"pasid:%u, for process %s pid %d thread %s pid %d)\n",
38251c60898SLe Ma 			hub_name, retry_fault ? "retry" : "no-retry",
383c4f46f22SChristian König 			entry->src_id, entry->ring_id, entry->vmid,
384efaa9646SAndrey Grodzovsky 			entry->pasid, task_info.process_name, task_info.tgid,
385efaa9646SAndrey Grodzovsky 			task_info.task_name, task_info.pid);
3865ddd4a9aSYong Zhao 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
38779a0c465SMonk Liu 			addr, entry->client_id);
3885ddd4a9aSYong Zhao 		if (!amdgpu_sriov_vf(adev)) {
3894d6cbde3SFelix Kuehling 			dev_err(adev->dev,
3904d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
3914d6cbde3SFelix Kuehling 				status);
3925ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
3935ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3945ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
3955ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
3965ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
3975ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
3985ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
3995ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
4005ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
4015ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
4025ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
4035ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
4044e0ae5e2SYong Zhao 			dev_err(adev->dev, "\t RW: 0x%lx\n",
4054e0ae5e2SYong Zhao 				REG_GET_FIELD(status,
4064e0ae5e2SYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, RW));
4075ddd4a9aSYong Zhao 
4085ddd4a9aSYong Zhao 		}
40979a0c465SMonk Liu 	}
410e60f8db5SAlex Xie 
411e60f8db5SAlex Xie 	return 0;
412e60f8db5SAlex Xie }
413e60f8db5SAlex Xie 
414e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
415e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
416e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
417e60f8db5SAlex Xie };
418e60f8db5SAlex Xie 
419791c4769Sxinhui pan 
420791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
421791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
422791c4769Sxinhui pan 	.process = gmc_v9_0_process_ecc_irq,
423791c4769Sxinhui pan };
424791c4769Sxinhui pan 
425e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
426e60f8db5SAlex Xie {
427770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
428770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
429791c4769Sxinhui pan 
430791c4769Sxinhui pan 	adev->gmc.ecc_irq.num_types = 1;
431791c4769Sxinhui pan 	adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
432e60f8db5SAlex Xie }
433e60f8db5SAlex Xie 
4342a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
4352a79d868SYong Zhao 					uint32_t flush_type)
43603f89febSChristian König {
43703f89febSChristian König 	u32 req = 0;
43803f89febSChristian König 
43903f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
440c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
4412a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
44203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
44303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
44403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
44503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
44603f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
44703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
44803f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
44903f89febSChristian König 
45003f89febSChristian König 	return req;
45103f89febSChristian König }
45203f89febSChristian König 
453e60f8db5SAlex Xie /*
454e60f8db5SAlex Xie  * GART
455e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
456e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
457e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
458e60f8db5SAlex Xie  */
459e60f8db5SAlex Xie 
460e60f8db5SAlex Xie /**
4612a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
462e60f8db5SAlex Xie  *
463e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
464e60f8db5SAlex Xie  * @vmid: vm instance to flush
4652a79d868SYong Zhao  * @flush_type: the flush type
466e60f8db5SAlex Xie  *
4672a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
468e60f8db5SAlex Xie  */
4693ff98548SOak Zeng static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
4703ff98548SOak Zeng 					uint32_t vmhub, uint32_t flush_type)
471e60f8db5SAlex Xie {
472e60f8db5SAlex Xie 	const unsigned eng = 17;
4733ff98548SOak Zeng 	u32 j, tmp;
4743ff98548SOak Zeng 	struct amdgpu_vmhub *hub;
475e60f8db5SAlex Xie 
4763ff98548SOak Zeng 	BUG_ON(vmhub >= adev->num_vmhubs);
4773ff98548SOak Zeng 
4783ff98548SOak Zeng 	hub = &adev->vmhub[vmhub];
4793ff98548SOak Zeng 	tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
480e60f8db5SAlex Xie 
48182d1a1b1SChengming Gui 	/* This is necessary for a HW workaround under SRIOV as well
48282d1a1b1SChengming Gui 	 * as GFXOFF under bare metal
48382d1a1b1SChengming Gui 	 */
48482d1a1b1SChengming Gui 	if (adev->gfx.kiq.ring.sched.ready &&
48582d1a1b1SChengming Gui 			(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
48682d1a1b1SChengming Gui 			!adev->in_gpu_reset) {
487af5fe1e9SChristian König 		uint32_t req = hub->vm_inv_eng0_req + eng;
488af5fe1e9SChristian König 		uint32_t ack = hub->vm_inv_eng0_ack + eng;
489af5fe1e9SChristian König 
490af5fe1e9SChristian König 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
491af5fe1e9SChristian König 				1 << vmid);
4923ff98548SOak Zeng 		return;
493fc0faf04SEmily Deng 	}
4943890d111SEmily Deng 
4953890d111SEmily Deng 	spin_lock(&adev->gmc.invalidate_lock);
496c7a7266bSXiangliang Yu 	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
49753499173SXiaojie Yuan 
49853499173SXiaojie Yuan 	/*
49953499173SXiaojie Yuan 	 * Issue a dummy read to wait for the ACK register to be cleared
50053499173SXiaojie Yuan 	 * to avoid a false ACK due to the new fast GRBM interface.
50153499173SXiaojie Yuan 	 */
50253499173SXiaojie Yuan 	if (vmhub == AMDGPU_GFXHUB_0)
50353499173SXiaojie Yuan 		RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
50453499173SXiaojie Yuan 
505e60f8db5SAlex Xie 	for (j = 0; j < adev->usec_timeout; j++) {
506c7a7266bSXiangliang Yu 		tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
507396557b0SChristian König 		if (tmp & (1 << vmid))
508e60f8db5SAlex Xie 			break;
509e60f8db5SAlex Xie 		udelay(1);
510e60f8db5SAlex Xie 	}
5113890d111SEmily Deng 	spin_unlock(&adev->gmc.invalidate_lock);
512396557b0SChristian König 	if (j < adev->usec_timeout)
5133ff98548SOak Zeng 		return;
514396557b0SChristian König 
515e60f8db5SAlex Xie 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
516e60f8db5SAlex Xie }
517e60f8db5SAlex Xie 
5189096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
519c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
5209096d6e5SChristian König {
521250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
522250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
5232a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
5249096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
5259096d6e5SChristian König 
5269096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
5279096d6e5SChristian König 			      lower_32_bits(pd_addr));
5289096d6e5SChristian König 
5299096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
5309096d6e5SChristian König 			      upper_32_bits(pd_addr));
5319096d6e5SChristian König 
532f8bc9037SAlex Deucher 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
533f8bc9037SAlex Deucher 					    hub->vm_inv_eng0_ack + eng,
534f8bc9037SAlex Deucher 					    req, 1 << vmid);
535f732b6b3SChristian König 
5369096d6e5SChristian König 	return pd_addr;
5379096d6e5SChristian König }
5389096d6e5SChristian König 
539c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
540c633c00bSChristian König 					unsigned pasid)
541c633c00bSChristian König {
542c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
543c633c00bSChristian König 	uint32_t reg;
544c633c00bSChristian König 
545f2d66571SLe Ma 	/* Do nothing because there's no lut register for mmhub1. */
546f2d66571SLe Ma 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
547f2d66571SLe Ma 		return;
548f2d66571SLe Ma 
549a2d15ed7SLe Ma 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
550c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
551c633c00bSChristian König 	else
552c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
553c633c00bSChristian König 
554c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
555c633c00bSChristian König }
556c633c00bSChristian König 
557e60f8db5SAlex Xie /*
558e60f8db5SAlex Xie  * PTE format on VEGA 10:
559e60f8db5SAlex Xie  * 63:59 reserved
560e60f8db5SAlex Xie  * 58:57 mtype
561e60f8db5SAlex Xie  * 56 F
562e60f8db5SAlex Xie  * 55 L
563e60f8db5SAlex Xie  * 54 P
564e60f8db5SAlex Xie  * 53 SW
565e60f8db5SAlex Xie  * 52 T
566e60f8db5SAlex Xie  * 50:48 reserved
567e60f8db5SAlex Xie  * 47:12 4k physical page base address
568e60f8db5SAlex Xie  * 11:7 fragment
569e60f8db5SAlex Xie  * 6 write
570e60f8db5SAlex Xie  * 5 read
571e60f8db5SAlex Xie  * 4 exe
572e60f8db5SAlex Xie  * 3 Z
573e60f8db5SAlex Xie  * 2 snooped
574e60f8db5SAlex Xie  * 1 system
575e60f8db5SAlex Xie  * 0 valid
576e60f8db5SAlex Xie  *
577e60f8db5SAlex Xie  * PDE format on VEGA 10:
578e60f8db5SAlex Xie  * 63:59 block fragment size
579e60f8db5SAlex Xie  * 58:55 reserved
580e60f8db5SAlex Xie  * 54 P
581e60f8db5SAlex Xie  * 53:48 reserved
582e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
583e60f8db5SAlex Xie  * 5:3 reserved
584e60f8db5SAlex Xie  * 2 C
585e60f8db5SAlex Xie  * 1 system
586e60f8db5SAlex Xie  * 0 valid
587e60f8db5SAlex Xie  */
588e60f8db5SAlex Xie 
589e60f8db5SAlex Xie static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
590e60f8db5SAlex Xie 						uint32_t flags)
591e60f8db5SAlex Xie 
592e60f8db5SAlex Xie {
593e60f8db5SAlex Xie 	uint64_t pte_flag = 0;
594e60f8db5SAlex Xie 
595e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
596e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
597e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_READABLE)
598e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_READABLE;
599e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
600e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_WRITEABLE;
601e60f8db5SAlex Xie 
602e60f8db5SAlex Xie 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
603e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
6047596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
605e60f8db5SAlex Xie 		break;
606e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
6077596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
608e60f8db5SAlex Xie 		break;
609e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
6107596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
611e60f8db5SAlex Xie 		break;
612093e48c0SOak Zeng 	case AMDGPU_VM_MTYPE_RW:
613093e48c0SOak Zeng 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
614093e48c0SOak Zeng 		break;
615e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
6167596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
617e60f8db5SAlex Xie 		break;
618e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
6197596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
620e60f8db5SAlex Xie 		break;
621e60f8db5SAlex Xie 	default:
6227596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
623e60f8db5SAlex Xie 		break;
624e60f8db5SAlex Xie 	}
625e60f8db5SAlex Xie 
626e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_PRT)
627e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_PRT;
628e60f8db5SAlex Xie 
629e60f8db5SAlex Xie 	return pte_flag;
630e60f8db5SAlex Xie }
631e60f8db5SAlex Xie 
6323de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
6333de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
634f75e237cSChristian König {
635bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
6363de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
637770d13b1SChristian König 			adev->gmc.vram_start;
6383de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
6396a42fd6fSChristian König 
640770d13b1SChristian König 	if (!adev->gmc.translate_further)
6416a42fd6fSChristian König 		return;
6426a42fd6fSChristian König 
6436a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
6446a42fd6fSChristian König 		/* Set the block fragment size */
6456a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
6466a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
6476a42fd6fSChristian König 
6486a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
6496a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
6506a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
6516a42fd6fSChristian König 		else
6526a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
6536a42fd6fSChristian König 	}
654f75e237cSChristian König }
655f75e237cSChristian König 
656132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
657132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
6589096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
659c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
660b1166325SChristian König 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
661b1166325SChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde
662e60f8db5SAlex Xie };
663e60f8db5SAlex Xie 
664132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
665e60f8db5SAlex Xie {
666132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
667e60f8db5SAlex Xie }
668e60f8db5SAlex Xie 
6695b6b35aaSHawking Zhang static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
6705b6b35aaSHawking Zhang {
6715b6b35aaSHawking Zhang 	switch (adev->asic_type) {
6725b6b35aaSHawking Zhang 	case CHIP_VEGA20:
6733aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
6743aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
6753aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
6763aacf4eaSTao Zhou 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET;
6773aacf4eaSTao Zhou 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
678045c0216STao Zhou 		adev->umc.funcs = &umc_v6_1_funcs;
6795b6b35aaSHawking Zhang 		break;
6805b6b35aaSHawking Zhang 	default:
6815b6b35aaSHawking Zhang 		break;
6825b6b35aaSHawking Zhang 	}
6835b6b35aaSHawking Zhang }
6845b6b35aaSHawking Zhang 
6853d093da0STao Zhou static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
6863d093da0STao Zhou {
6873d093da0STao Zhou 	switch (adev->asic_type) {
6883d093da0STao Zhou 	case CHIP_VEGA20:
6893d093da0STao Zhou 		adev->mmhub_funcs = &mmhub_v1_0_funcs;
6903d093da0STao Zhou 		break;
6913d093da0STao Zhou 	default:
6923d093da0STao Zhou 		break;
6933d093da0STao Zhou 	}
6943d093da0STao Zhou }
6953d093da0STao Zhou 
696e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
697e60f8db5SAlex Xie {
698e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
699e60f8db5SAlex Xie 
700132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
701e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
7025b6b35aaSHawking Zhang 	gmc_v9_0_set_umc_funcs(adev);
7033d093da0STao Zhou 	gmc_v9_0_set_mmhub_funcs(adev);
704e60f8db5SAlex Xie 
705770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
706770d13b1SChristian König 	adev->gmc.shared_aperture_end =
707770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
708bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
709770d13b1SChristian König 	adev->gmc.private_aperture_end =
710770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
711a7ea6548SAlex Deucher 
712e60f8db5SAlex Xie 	return 0;
713e60f8db5SAlex Xie }
714e60f8db5SAlex Xie 
715cd2b5623SAlex Deucher static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
716cd2b5623SAlex Deucher {
717cd2b5623SAlex Deucher 
718cd2b5623SAlex Deucher 	/*
719cd2b5623SAlex Deucher 	 * TODO:
720cd2b5623SAlex Deucher 	 * Currently there is a bug where some memory client outside
721cd2b5623SAlex Deucher 	 * of the driver writes to first 8M of VRAM on S3 resume,
722cd2b5623SAlex Deucher 	 * this overrides GART which by default gets placed in first 8M and
723cd2b5623SAlex Deucher 	 * causes VM_FAULTS once GTT is accessed.
724cd2b5623SAlex Deucher 	 * Keep the stolen memory reservation until the while this is not solved.
725cd2b5623SAlex Deucher 	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
726cd2b5623SAlex Deucher 	 */
727cd2b5623SAlex Deucher 	switch (adev->asic_type) {
72895010ba7SAlex Deucher 	case CHIP_VEGA10:
7296abc0c8fSAlex Deucher 	case CHIP_RAVEN:
730bfa3a9bbSHawking Zhang 	case CHIP_ARCTURUS:
7318787ee01SHuang Rui 	case CHIP_RENOIR:
73202122753SFlora Cui 		return true;
7336abc0c8fSAlex Deucher 	case CHIP_VEGA12:
734cd2b5623SAlex Deucher 	case CHIP_VEGA20:
735cd2b5623SAlex Deucher 	default:
7366abc0c8fSAlex Deucher 		return false;
737cd2b5623SAlex Deucher 	}
738cd2b5623SAlex Deucher }
739cd2b5623SAlex Deucher 
740c713a461SEvan Quan static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
741c713a461SEvan Quan {
742c713a461SEvan Quan 	struct amdgpu_ring *ring;
743c713a461SEvan Quan 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
744c8a6e2a3SLe Ma 		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
745c8a6e2a3SLe Ma 		GFXHUB_FREE_VM_INV_ENGS_BITMAP};
746c713a461SEvan Quan 	unsigned i;
747c713a461SEvan Quan 	unsigned vmhub, inv_eng;
748c713a461SEvan Quan 
749c713a461SEvan Quan 	for (i = 0; i < adev->num_rings; ++i) {
750c713a461SEvan Quan 		ring = adev->rings[i];
751c713a461SEvan Quan 		vmhub = ring->funcs->vmhub;
752c713a461SEvan Quan 
753c713a461SEvan Quan 		inv_eng = ffs(vm_inv_engs[vmhub]);
754c713a461SEvan Quan 		if (!inv_eng) {
755c713a461SEvan Quan 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
756c713a461SEvan Quan 				ring->name);
757c713a461SEvan Quan 			return -EINVAL;
758c713a461SEvan Quan 		}
759c713a461SEvan Quan 
760c713a461SEvan Quan 		ring->vm_inv_eng = inv_eng - 1;
76172464382SChristian König 		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
762c713a461SEvan Quan 
763c713a461SEvan Quan 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
764c713a461SEvan Quan 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
765c713a461SEvan Quan 	}
766c713a461SEvan Quan 
767c713a461SEvan Quan 	return 0;
768c713a461SEvan Quan }
769c713a461SEvan Quan 
770145b03ebSTao Zhou static int gmc_v9_0_ecc_late_init(void *handle)
771145b03ebSTao Zhou {
772145b03ebSTao Zhou 	int r;
7732452e778SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7742452e778SHawking Zhang 	struct ras_ih_if umc_ih_info = {
7752452e778SHawking Zhang 		.cb = gmc_v9_0_process_ras_data_cb,
776145b03ebSTao Zhou 	};
777145b03ebSTao Zhou 
778a85eff14SHawking Zhang 	r = amdgpu_gmc_ras_late_init(adev, &umc_ih_info);
779145b03ebSTao Zhou 	if (r)
780a85eff14SHawking Zhang 		return r;
7812452e778SHawking Zhang 
7824ce71be6SHawking Zhang 	if (adev->mmhub_funcs && adev->mmhub_funcs->ras_late_init) {
783dda79907SHawking Zhang 		r = adev->mmhub_funcs->ras_late_init(adev);
7842452e778SHawking Zhang 		if (r)
785dda79907SHawking Zhang 			return r;
786dda79907SHawking Zhang 	}
7872452e778SHawking Zhang 	return 0;
788145b03ebSTao Zhou }
789791c4769Sxinhui pan 
790e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
791e60f8db5SAlex Xie {
792e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793c5b6e585STao Zhou 	int r;
7944789c463SChristian König 
795cd2b5623SAlex Deucher 	if (!gmc_v9_0_keep_stolen_memory(adev))
796cd2b5623SAlex Deucher 		amdgpu_bo_late_init(adev);
7976f752ec2SAndrey Grodzovsky 
798c713a461SEvan Quan 	r = gmc_v9_0_allocate_vm_inv_eng(adev);
799c713a461SEvan Quan 	if (r)
800c713a461SEvan Quan 		return r;
801f49ea9f8SHawking Zhang 	/* Check if ecc is available */
802f49ea9f8SHawking Zhang 	if (!amdgpu_sriov_vf(adev)) {
803f49ea9f8SHawking Zhang 		switch (adev->asic_type) {
804f49ea9f8SHawking Zhang 		case CHIP_VEGA10:
805f49ea9f8SHawking Zhang 		case CHIP_VEGA20:
806f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_mem_ecc_supported(adev);
807f49ea9f8SHawking Zhang 			if (!r) {
80802bab923SDavid Panariti 				DRM_INFO("ECC is not present.\n");
809f49ea9f8SHawking Zhang 				if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
810e1d1a772SAlex Deucher 					adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
81102bab923SDavid Panariti 			} else {
812f49ea9f8SHawking Zhang 				DRM_INFO("ECC is active.\n");
813f49ea9f8SHawking Zhang 			}
814f49ea9f8SHawking Zhang 
815f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_sram_ecc_supported(adev);
816f49ea9f8SHawking Zhang 			if (!r) {
817f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is not present.\n");
818f49ea9f8SHawking Zhang 			} else {
819f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is active.\n");
820f49ea9f8SHawking Zhang 			}
821f49ea9f8SHawking Zhang 			break;
822f49ea9f8SHawking Zhang 		default:
823f49ea9f8SHawking Zhang 			break;
82402bab923SDavid Panariti 		}
8255ba4fa35SAlex Deucher 	}
82602bab923SDavid Panariti 
827791c4769Sxinhui pan 	r = gmc_v9_0_ecc_late_init(handle);
828791c4769Sxinhui pan 	if (r)
829e60f8db5SAlex Xie 		return r;
830e60f8db5SAlex Xie 
831770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
832e60f8db5SAlex Xie }
833e60f8db5SAlex Xie 
834e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
835770d13b1SChristian König 					struct amdgpu_gmc *mc)
836e60f8db5SAlex Xie {
837e60f8db5SAlex Xie 	u64 base = 0;
8389d4f837aSFrank.Min 
83951cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
84051cce480SLe Ma 		base = mmhub_v9_4_get_fb_location(adev);
8419d4f837aSFrank.Min 	else if (!amdgpu_sriov_vf(adev))
842e60f8db5SAlex Xie 		base = mmhub_v1_0_get_fb_location(adev);
8439d4f837aSFrank.Min 
8446fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
8456fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
84683afe835SOak Zeng 	amdgpu_gmc_vram_location(adev, mc, base);
847961c75cfSChristian König 	amdgpu_gmc_gart_location(adev, mc);
848c3e1b43cSChristian König 	amdgpu_gmc_agp_location(adev, mc);
849e60f8db5SAlex Xie 	/* base offset of vram pages */
850e60f8db5SAlex Xie 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
8516fdd68b1SAlex Deucher 
8526fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
8536fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
8546fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
855e60f8db5SAlex Xie }
856e60f8db5SAlex Xie 
857e60f8db5SAlex Xie /**
858e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
859e60f8db5SAlex Xie  *
860e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
861e60f8db5SAlex Xie  *
862e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
863e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
864e60f8db5SAlex Xie  * Returns 0 for success.
865e60f8db5SAlex Xie  */
866e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
867e60f8db5SAlex Xie {
868e60f8db5SAlex Xie 	int chansize, numchan;
869e60f8db5SAlex Xie 	int r;
870e60f8db5SAlex Xie 
871067e75b3SAlex Deucher 	if (amdgpu_sriov_vf(adev)) {
872067e75b3SAlex Deucher 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
873067e75b3SAlex Deucher 		 * and DF related registers is not readable, seems hardcord is the
874067e75b3SAlex Deucher 		 * only way to set the correct vram_width
875067e75b3SAlex Deucher 		 */
876067e75b3SAlex Deucher 		adev->gmc.vram_width = 2048;
877067e75b3SAlex Deucher 	} else if (amdgpu_emu_mode != 1) {
878770d13b1SChristian König 		adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
879067e75b3SAlex Deucher 	}
880067e75b3SAlex Deucher 
881770d13b1SChristian König 	if (!adev->gmc.vram_width) {
882e60f8db5SAlex Xie 		/* hbm memory channel size */
883585b7f16STom St Denis 		if (adev->flags & AMD_IS_APU)
884585b7f16STom St Denis 			chansize = 64;
885585b7f16STom St Denis 		else
886e60f8db5SAlex Xie 			chansize = 128;
887e60f8db5SAlex Xie 
888070706c0SHawking Zhang 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
889770d13b1SChristian König 		adev->gmc.vram_width = numchan * chansize;
890e60f8db5SAlex Xie 	}
891e60f8db5SAlex Xie 
892e60f8db5SAlex Xie 	/* size in MB on si */
893770d13b1SChristian König 	adev->gmc.mc_vram_size =
894bebc0762SHawking Zhang 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
895770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
896e60f8db5SAlex Xie 
897e60f8db5SAlex Xie 	if (!(adev->flags & AMD_IS_APU)) {
898e60f8db5SAlex Xie 		r = amdgpu_device_resize_fb_bar(adev);
899e60f8db5SAlex Xie 		if (r)
900e60f8db5SAlex Xie 			return r;
901e60f8db5SAlex Xie 	}
902770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
903770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
904e60f8db5SAlex Xie 
905156a81beSChunming Zhou #ifdef CONFIG_X86_64
906156a81beSChunming Zhou 	if (adev->flags & AMD_IS_APU) {
907156a81beSChunming Zhou 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
908156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
909156a81beSChunming Zhou 	}
910156a81beSChunming Zhou #endif
911e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
912770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
913770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
914770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
915e60f8db5SAlex Xie 
916e60f8db5SAlex Xie 	/* set the gart size */
917e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1) {
918e60f8db5SAlex Xie 		switch (adev->asic_type) {
919e60f8db5SAlex Xie 		case CHIP_VEGA10:  /* all engines support GPUVM */
920273a14cdSAlex Deucher 		case CHIP_VEGA12:  /* all engines support GPUVM */
921d96b428cSFeifei Xu 		case CHIP_VEGA20:
9223de2ff5dSLe Ma 		case CHIP_ARCTURUS:
923e60f8db5SAlex Xie 		default:
924fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
925e60f8db5SAlex Xie 			break;
926e60f8db5SAlex Xie 		case CHIP_RAVEN:   /* DCE SG support */
9278787ee01SHuang Rui 		case CHIP_RENOIR:
928770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
929e60f8db5SAlex Xie 			break;
930e60f8db5SAlex Xie 		}
931e60f8db5SAlex Xie 	} else {
932770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
933e60f8db5SAlex Xie 	}
934e60f8db5SAlex Xie 
935770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
936e60f8db5SAlex Xie 
937e60f8db5SAlex Xie 	return 0;
938e60f8db5SAlex Xie }
939e60f8db5SAlex Xie 
940e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
941e60f8db5SAlex Xie {
942e60f8db5SAlex Xie 	int r;
943e60f8db5SAlex Xie 
9441123b989SChristian König 	if (adev->gart.bo) {
945e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
946e60f8db5SAlex Xie 		return 0;
947e60f8db5SAlex Xie 	}
948e60f8db5SAlex Xie 	/* Initialize common gart structure */
949e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
950e60f8db5SAlex Xie 	if (r)
951e60f8db5SAlex Xie 		return r;
952e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
9537596ab68SHawking Zhang 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
954e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
955e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
956e60f8db5SAlex Xie }
957e60f8db5SAlex Xie 
958ebdef28eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
959ebdef28eSAlex Deucher {
960bfa3a9bbSHawking Zhang 	u32 d1vga_control;
961ebdef28eSAlex Deucher 	unsigned size;
962ebdef28eSAlex Deucher 
9636f752ec2SAndrey Grodzovsky 	/*
9646f752ec2SAndrey Grodzovsky 	 * TODO Remove once GART corruption is resolved
9656f752ec2SAndrey Grodzovsky 	 * Check related code in gmc_v9_0_sw_fini
9666f752ec2SAndrey Grodzovsky 	 * */
967cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
968cd2b5623SAlex Deucher 		return 9 * 1024 * 1024;
9696f752ec2SAndrey Grodzovsky 
970bfa3a9bbSHawking Zhang 	d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
971ebdef28eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
972ebdef28eSAlex Deucher 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
973ebdef28eSAlex Deucher 	} else {
974ebdef28eSAlex Deucher 		u32 viewport;
975ebdef28eSAlex Deucher 
976ebdef28eSAlex Deucher 		switch (adev->asic_type) {
977ebdef28eSAlex Deucher 		case CHIP_RAVEN:
9788787ee01SHuang Rui 		case CHIP_RENOIR:
979ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
980ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
981ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
982ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport,
983ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
984ebdef28eSAlex Deucher 				4);
985ebdef28eSAlex Deucher 			break;
986ebdef28eSAlex Deucher 		case CHIP_VEGA10:
987ebdef28eSAlex Deucher 		case CHIP_VEGA12:
988cd2b5623SAlex Deucher 		case CHIP_VEGA20:
989ebdef28eSAlex Deucher 		default:
990ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
991ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
992ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
993ebdef28eSAlex Deucher 				4);
994ebdef28eSAlex Deucher 			break;
995ebdef28eSAlex Deucher 		}
996ebdef28eSAlex Deucher 	}
997ebdef28eSAlex Deucher 	/* return 0 if the pre-OS buffer uses up most of vram */
998ebdef28eSAlex Deucher 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
999ebdef28eSAlex Deucher 		return 0;
10006f752ec2SAndrey Grodzovsky 
1001ebdef28eSAlex Deucher 	return size;
1002ebdef28eSAlex Deucher }
1003ebdef28eSAlex Deucher 
1004e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
1005e60f8db5SAlex Xie {
1006e60f8db5SAlex Xie 	int r;
1007e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1008e60f8db5SAlex Xie 
1009e60f8db5SAlex Xie 	gfxhub_v1_0_init(adev);
101051cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
101151cce480SLe Ma 		mmhub_v9_4_init(adev);
101251cce480SLe Ma 	else
1013e60f8db5SAlex Xie 		mmhub_v1_0_init(adev);
1014e60f8db5SAlex Xie 
1015770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
1016e60f8db5SAlex Xie 
10171e09b053SHawking Zhang 	adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
1018e60f8db5SAlex Xie 	switch (adev->asic_type) {
1019e60f8db5SAlex Xie 	case CHIP_RAVEN:
10201daa2bfaSLe Ma 		adev->num_vmhubs = 2;
10211daa2bfaSLe Ma 
10226a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1023f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
10246a42fd6fSChristian König 		} else {
10256a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
10266a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1027770d13b1SChristian König 			adev->gmc.translate_further =
10286a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
10296a42fd6fSChristian König 		}
1030e60f8db5SAlex Xie 		break;
1031e60f8db5SAlex Xie 	case CHIP_VEGA10:
1032273a14cdSAlex Deucher 	case CHIP_VEGA12:
1033d96b428cSFeifei Xu 	case CHIP_VEGA20:
10348787ee01SHuang Rui 	case CHIP_RENOIR:
10351daa2bfaSLe Ma 		adev->num_vmhubs = 2;
10361daa2bfaSLe Ma 
10378787ee01SHuang Rui 
1038e60f8db5SAlex Xie 		/*
1039e60f8db5SAlex Xie 		 * To fulfill 4-level page support,
1040e60f8db5SAlex Xie 		 * vm size is 256TB (48bit), maximum size of Vega10,
1041e60f8db5SAlex Xie 		 * block size 512 (9bit)
1042e60f8db5SAlex Xie 		 */
1043cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1044cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
1045cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1046cdba61daSwentalou 		else
1047f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1048e60f8db5SAlex Xie 		break;
10493de2ff5dSLe Ma 	case CHIP_ARCTURUS:
1050c8a6e2a3SLe Ma 		adev->num_vmhubs = 3;
1051c8a6e2a3SLe Ma 
10523de2ff5dSLe Ma 		/* Keep the vm size same with Vega20 */
10533de2ff5dSLe Ma 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
10543de2ff5dSLe Ma 		break;
1055e60f8db5SAlex Xie 	default:
1056e60f8db5SAlex Xie 		break;
1057e60f8db5SAlex Xie 	}
1058e60f8db5SAlex Xie 
1059e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
106044a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1061770d13b1SChristian König 				&adev->gmc.vm_fault);
106230da7bb1SChristian König 	if (r)
106330da7bb1SChristian König 		return r;
106430da7bb1SChristian König 
10657d19b15fSLe Ma 	if (adev->asic_type == CHIP_ARCTURUS) {
10667d19b15fSLe Ma 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
10677d19b15fSLe Ma 					&adev->gmc.vm_fault);
10687d19b15fSLe Ma 		if (r)
10697d19b15fSLe Ma 			return r;
10707d19b15fSLe Ma 	}
10717d19b15fSLe Ma 
107244a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1073770d13b1SChristian König 				&adev->gmc.vm_fault);
1074e60f8db5SAlex Xie 
1075e60f8db5SAlex Xie 	if (r)
1076e60f8db5SAlex Xie 		return r;
1077e60f8db5SAlex Xie 
1078791c4769Sxinhui pan 	/* interrupt sent to DF. */
1079791c4769Sxinhui pan 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1080791c4769Sxinhui pan 			&adev->gmc.ecc_irq);
1081791c4769Sxinhui pan 	if (r)
1082791c4769Sxinhui pan 		return r;
1083791c4769Sxinhui pan 
1084e60f8db5SAlex Xie 	/* Set the internal MC address mask
1085e60f8db5SAlex Xie 	 * This is the max address of the GPU's
1086e60f8db5SAlex Xie 	 * internal address space.
1087e60f8db5SAlex Xie 	 */
1088770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1089e60f8db5SAlex Xie 
1090244511f3SChristoph Hellwig 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1091e60f8db5SAlex Xie 	if (r) {
1092e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1093244511f3SChristoph Hellwig 		return r;
1094e60f8db5SAlex Xie 	}
1095244511f3SChristoph Hellwig 	adev->need_swiotlb = drm_need_swiotlb(44);
1096e60f8db5SAlex Xie 
109747622ba0SAlex Deucher 	if (adev->gmc.xgmi.supported) {
1098bf0a60b7SAlex Deucher 		r = gfxhub_v1_1_get_xgmi_info(adev);
1099bf0a60b7SAlex Deucher 		if (r)
1100bf0a60b7SAlex Deucher 			return r;
1101bf0a60b7SAlex Deucher 	}
1102bf0a60b7SAlex Deucher 
1103e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
1104e60f8db5SAlex Xie 	if (r)
1105e60f8db5SAlex Xie 		return r;
1106e60f8db5SAlex Xie 
1107ebdef28eSAlex Deucher 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1108ebdef28eSAlex Deucher 
1109e60f8db5SAlex Xie 	/* Memory manager */
1110e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
1111e60f8db5SAlex Xie 	if (r)
1112e60f8db5SAlex Xie 		return r;
1113e60f8db5SAlex Xie 
1114e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
1115e60f8db5SAlex Xie 	if (r)
1116e60f8db5SAlex Xie 		return r;
1117e60f8db5SAlex Xie 
111805ec3edaSChristian König 	/*
111905ec3edaSChristian König 	 * number of VMs
112005ec3edaSChristian König 	 * VMID 0 is reserved for System
112105ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
112205ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
112305ec3edaSChristian König 	 */
1124a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1125a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1126c8a6e2a3SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
112705ec3edaSChristian König 
112805ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
112905ec3edaSChristian König 
113005ec3edaSChristian König 	return 0;
1131e60f8db5SAlex Xie }
1132e60f8db5SAlex Xie 
1133e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
1134e60f8db5SAlex Xie {
1135e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1136994dcfaaSTianci.Yin 	void *stolen_vga_buf;
1137e60f8db5SAlex Xie 
1138791c4769Sxinhui pan 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1139145b03ebSTao Zhou 			adev->gmc.umc_ras_if) {
1140145b03ebSTao Zhou 		struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
1141791c4769Sxinhui pan 		struct ras_ih_if ih_info = {
1142791c4769Sxinhui pan 			.head = *ras_if,
1143791c4769Sxinhui pan 		};
1144791c4769Sxinhui pan 
1145791c4769Sxinhui pan 		/* remove fs first */
1146791c4769Sxinhui pan 		amdgpu_ras_debugfs_remove(adev, ras_if);
1147791c4769Sxinhui pan 		amdgpu_ras_sysfs_remove(adev, ras_if);
1148791c4769Sxinhui pan 		/* remove the IH */
1149791c4769Sxinhui pan 		amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1150791c4769Sxinhui pan 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1151791c4769Sxinhui pan 		kfree(ras_if);
1152791c4769Sxinhui pan 	}
1153791c4769Sxinhui pan 
1154145b03ebSTao Zhou 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
1155145b03ebSTao Zhou 			adev->gmc.mmhub_ras_if) {
1156145b03ebSTao Zhou 		struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
1157145b03ebSTao Zhou 
1158145b03ebSTao Zhou 		/* remove fs and disable ras feature */
1159145b03ebSTao Zhou 		amdgpu_ras_debugfs_remove(adev, ras_if);
1160145b03ebSTao Zhou 		amdgpu_ras_sysfs_remove(adev, ras_if);
1161145b03ebSTao Zhou 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1162145b03ebSTao Zhou 		kfree(ras_if);
1163145b03ebSTao Zhou 	}
1164145b03ebSTao Zhou 
1165f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
1166e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
11676f752ec2SAndrey Grodzovsky 
1168cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
1169994dcfaaSTianci.Yin 		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
11706f752ec2SAndrey Grodzovsky 
1171a3d9103eSAndrey Grodzovsky 	amdgpu_gart_table_vram_free(adev);
1172e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
1173a3d9103eSAndrey Grodzovsky 	amdgpu_gart_fini(adev);
1174e60f8db5SAlex Xie 
1175e60f8db5SAlex Xie 	return 0;
1176e60f8db5SAlex Xie }
1177e60f8db5SAlex Xie 
1178e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1179e60f8db5SAlex Xie {
1180946a4d5bSShaoyun Liu 
1181e60f8db5SAlex Xie 	switch (adev->asic_type) {
1182e60f8db5SAlex Xie 	case CHIP_VEGA10:
11834cd4c5c0SMonk Liu 		if (amdgpu_sriov_vf(adev))
118498cad2deSTrigger Huang 			break;
118598cad2deSTrigger Huang 		/* fall through */
1186d96b428cSFeifei Xu 	case CHIP_VEGA20:
1187946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11885c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
1189c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1190946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11915c583018SEvan Quan 						golden_settings_athub_1_0_0,
1192c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1193e60f8db5SAlex Xie 		break;
1194273a14cdSAlex Deucher 	case CHIP_VEGA12:
1195273a14cdSAlex Deucher 		break;
1196e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
11978787ee01SHuang Rui 		/* TODO for renoir */
1198946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11995c583018SEvan Quan 						golden_settings_athub_1_0_0,
1200c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1201e4f3abaaSChunming Zhou 		break;
1202e60f8db5SAlex Xie 	default:
1203e60f8db5SAlex Xie 		break;
1204e60f8db5SAlex Xie 	}
1205e60f8db5SAlex Xie }
1206e60f8db5SAlex Xie 
1207e60f8db5SAlex Xie /**
1208e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1209e60f8db5SAlex Xie  *
1210e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1211e60f8db5SAlex Xie  */
1212e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1213e60f8db5SAlex Xie {
12143ff98548SOak Zeng 	int r, i;
1215e60f8db5SAlex Xie 	bool value;
1216e60f8db5SAlex Xie 	u32 tmp;
1217e60f8db5SAlex Xie 
12189c3f2b54SAlex Deucher 	amdgpu_device_program_register_sequence(adev,
1219e60f8db5SAlex Xie 						golden_settings_vega10_hdp,
1220c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_vega10_hdp));
1221e60f8db5SAlex Xie 
12221123b989SChristian König 	if (adev->gart.bo == NULL) {
1223e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1224e60f8db5SAlex Xie 		return -EINVAL;
1225e60f8db5SAlex Xie 	}
1226ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
1227ce1b1b66SMonk Liu 	if (r)
1228ce1b1b66SMonk Liu 		return r;
1229e60f8db5SAlex Xie 
12302fcd43ceSHawking Zhang 	switch (adev->asic_type) {
12312fcd43ceSHawking Zhang 	case CHIP_RAVEN:
12328787ee01SHuang Rui 		/* TODO for renoir */
1233f8386b35SHawking Zhang 		mmhub_v1_0_update_power_gating(adev, true);
12342fcd43ceSHawking Zhang 		break;
12352fcd43ceSHawking Zhang 	default:
12362fcd43ceSHawking Zhang 		break;
12372fcd43ceSHawking Zhang 	}
12382fcd43ceSHawking Zhang 
1239e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
1240e60f8db5SAlex Xie 	if (r)
1241e60f8db5SAlex Xie 		return r;
1242e60f8db5SAlex Xie 
124351cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
124451cce480SLe Ma 		r = mmhub_v9_4_gart_enable(adev);
124551cce480SLe Ma 	else
1246e60f8db5SAlex Xie 		r = mmhub_v1_0_gart_enable(adev);
1247e60f8db5SAlex Xie 	if (r)
1248e60f8db5SAlex Xie 		return r;
1249e60f8db5SAlex Xie 
1250846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1251e60f8db5SAlex Xie 
1252b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1253b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1254e60f8db5SAlex Xie 
1255fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1256fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1257fe2b5323STiecheng Zhou 
12581d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
1259bebc0762SHawking Zhang 	adev->nbio.funcs->hdp_flush(adev, NULL);
12601d4e0a8cSMonk Liu 
1261e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1262e60f8db5SAlex Xie 		value = false;
1263e60f8db5SAlex Xie 	else
1264e60f8db5SAlex Xie 		value = true;
1265e60f8db5SAlex Xie 
1266e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
126751cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
126851cce480SLe Ma 		mmhub_v9_4_set_fault_enable_default(adev, value);
126951cce480SLe Ma 	else
1270e60f8db5SAlex Xie 		mmhub_v1_0_set_fault_enable_default(adev, value);
12713ff98548SOak Zeng 
12723ff98548SOak Zeng 	for (i = 0; i < adev->num_vmhubs; ++i)
12733ff98548SOak Zeng 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1274e60f8db5SAlex Xie 
1275e60f8db5SAlex Xie 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1276770d13b1SChristian König 		 (unsigned)(adev->gmc.gart_size >> 20),
12774e830fb1SChristian König 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1278e60f8db5SAlex Xie 	adev->gart.ready = true;
1279e60f8db5SAlex Xie 	return 0;
1280e60f8db5SAlex Xie }
1281e60f8db5SAlex Xie 
1282e60f8db5SAlex Xie static int gmc_v9_0_hw_init(void *handle)
1283e60f8db5SAlex Xie {
1284e60f8db5SAlex Xie 	int r;
1285e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1286e60f8db5SAlex Xie 
1287e60f8db5SAlex Xie 	/* The sequence of these two function calls matters.*/
1288e60f8db5SAlex Xie 	gmc_v9_0_init_golden_registers(adev);
1289e60f8db5SAlex Xie 
1290edca2d05SAlex Deucher 	if (adev->mode_info.num_crtc) {
1291edca2d05SAlex Deucher 		/* Lockout access through VGA aperture*/
12924d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1293edca2d05SAlex Deucher 
1294edca2d05SAlex Deucher 		/* disable VGA render */
12954d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1296edca2d05SAlex Deucher 	}
1297edca2d05SAlex Deucher 
1298e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1299e60f8db5SAlex Xie 
1300e60f8db5SAlex Xie 	return r;
1301e60f8db5SAlex Xie }
1302e60f8db5SAlex Xie 
1303e60f8db5SAlex Xie /**
1304e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1305e60f8db5SAlex Xie  *
1306e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1307e60f8db5SAlex Xie  *
1308e60f8db5SAlex Xie  * This disables all VM page table.
1309e60f8db5SAlex Xie  */
1310e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1311e60f8db5SAlex Xie {
1312e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
131351cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
131451cce480SLe Ma 		mmhub_v9_4_gart_disable(adev);
131551cce480SLe Ma 	else
1316e60f8db5SAlex Xie 		mmhub_v1_0_gart_disable(adev);
1317ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1318e60f8db5SAlex Xie }
1319e60f8db5SAlex Xie 
1320e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1321e60f8db5SAlex Xie {
1322e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1323e60f8db5SAlex Xie 
13245dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
13255dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
13265dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
13275dd696aeSTrigger Huang 		return 0;
13285dd696aeSTrigger Huang 	}
13295dd696aeSTrigger Huang 
1330791c4769Sxinhui pan 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1331770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1332e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1333e60f8db5SAlex Xie 
1334e60f8db5SAlex Xie 	return 0;
1335e60f8db5SAlex Xie }
1336e60f8db5SAlex Xie 
1337e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1338e60f8db5SAlex Xie {
1339e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1340e60f8db5SAlex Xie 
1341f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1342e60f8db5SAlex Xie }
1343e60f8db5SAlex Xie 
1344e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1345e60f8db5SAlex Xie {
1346e60f8db5SAlex Xie 	int r;
1347e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1348e60f8db5SAlex Xie 
1349e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1350e60f8db5SAlex Xie 	if (r)
1351e60f8db5SAlex Xie 		return r;
1352e60f8db5SAlex Xie 
1353620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1354e60f8db5SAlex Xie 
135532601d48SChristian König 	return 0;
1356e60f8db5SAlex Xie }
1357e60f8db5SAlex Xie 
1358e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1359e60f8db5SAlex Xie {
1360e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1361e60f8db5SAlex Xie 	return true;
1362e60f8db5SAlex Xie }
1363e60f8db5SAlex Xie 
1364e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1365e60f8db5SAlex Xie {
1366e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1367e60f8db5SAlex Xie 	return 0;
1368e60f8db5SAlex Xie }
1369e60f8db5SAlex Xie 
1370e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1371e60f8db5SAlex Xie {
1372e60f8db5SAlex Xie 	/* XXX for emulation.*/
1373e60f8db5SAlex Xie 	return 0;
1374e60f8db5SAlex Xie }
1375e60f8db5SAlex Xie 
1376e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1377e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1378e60f8db5SAlex Xie {
1379d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1380d5583d4fSHuang Rui 
138151cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
1382cb15e804SLe Ma 		mmhub_v9_4_set_clockgating(adev, state);
1383cb15e804SLe Ma 	else
1384bee7b51aSLe Ma 		mmhub_v1_0_set_clockgating(adev, state);
1385bee7b51aSLe Ma 
1386bee7b51aSLe Ma 	athub_v1_0_set_clockgating(adev, state);
1387bee7b51aSLe Ma 
1388bee7b51aSLe Ma 	return 0;
1389e60f8db5SAlex Xie }
1390e60f8db5SAlex Xie 
139113052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
139213052be5SHuang Rui {
139313052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
139413052be5SHuang Rui 
139551cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
1396cb15e804SLe Ma 		mmhub_v9_4_get_clockgating(adev, flags);
1397cb15e804SLe Ma 	else
139813052be5SHuang Rui 		mmhub_v1_0_get_clockgating(adev, flags);
1399bee7b51aSLe Ma 
1400bee7b51aSLe Ma 	athub_v1_0_get_clockgating(adev, flags);
140113052be5SHuang Rui }
140213052be5SHuang Rui 
1403e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1404e60f8db5SAlex Xie 					enum amd_powergating_state state)
1405e60f8db5SAlex Xie {
1406e60f8db5SAlex Xie 	return 0;
1407e60f8db5SAlex Xie }
1408e60f8db5SAlex Xie 
1409e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1410e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1411e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1412e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1413e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1414e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1415e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1416e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1417e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1418e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1419e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1420e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1421e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1422e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1423e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
142413052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1425e60f8db5SAlex Xie };
1426e60f8db5SAlex Xie 
1427e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1428e60f8db5SAlex Xie {
1429e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1430e60f8db5SAlex Xie 	.major = 9,
1431e60f8db5SAlex Xie 	.minor = 0,
1432e60f8db5SAlex Xie 	.rev = 0,
1433e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1434e60f8db5SAlex Xie };
1435