xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision e7da754b)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23f867723bSSam Ravnborg 
24e60f8db5SAlex Xie #include <linux/firmware.h>
25f867723bSSam Ravnborg #include <linux/pci.h>
26f867723bSSam Ravnborg 
27fd5fd480SChunming Zhou #include <drm/drm_cache.h>
28f867723bSSam Ravnborg 
29e60f8db5SAlex Xie #include "amdgpu.h"
30e60f8db5SAlex Xie #include "gmc_v9_0.h"
318d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
322cddc50eSHuang Rui #include "amdgpu_gem.h"
33e60f8db5SAlex Xie 
3475199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3575199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
36cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
37135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
38135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
39fb960bd2SFeifei Xu #include "vega10_enum.h"
4065417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
416ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
42250b4228SChristian König #include "oss/osssys_4_0_offset.h"
43e60f8db5SAlex Xie 
44946a4d5bSShaoyun Liu #include "soc15.h"
45e60f8db5SAlex Xie #include "soc15_common.h"
4690c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
47e60f8db5SAlex Xie 
48e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
49e60f8db5SAlex Xie #include "mmhub_v1_0.h"
50bee7b51aSLe Ma #include "athub_v1_0.h"
51bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
5251cce480SLe Ma #include "mmhub_v9_4.h"
535b6b35aaSHawking Zhang #include "umc_v6_1.h"
54e7da754bSMonk Liu #include "umc_v6_0.h"
55e60f8db5SAlex Xie 
5644a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
5744a99b65SAndrey Grodzovsky 
58791c4769Sxinhui pan #include "amdgpu_ras.h"
59029fbd43SHawking Zhang #include "amdgpu_xgmi.h"
60791c4769Sxinhui pan 
61ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
62ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
63ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
64ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
65ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
66ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
67ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
68ebdef28eSAlex Deucher 
69e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
70e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
71e60f8db5SAlex Xie 
72e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
73e60f8db5SAlex Xie {
74e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
75e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
76e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
77e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
78e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
79e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
80e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
81e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
82e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
83e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
84e60f8db5SAlex Xie };
85e60f8db5SAlex Xie 
86946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
875c583018SEvan Quan {
88946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
89946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
905c583018SEvan Quan };
915c583018SEvan Quan 
92946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
935c583018SEvan Quan {
94946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
95946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
965c583018SEvan Quan };
975c583018SEvan Quan 
98791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
99791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
100791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
101791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
102791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
103791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
104791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
105791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
106791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
107791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
108791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
109791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
110791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
111791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
112791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
113791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
114791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
115791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
116791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
117791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
118791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
119791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
120791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
121791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
122791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
123791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
124791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
125791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
126791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
127791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
128791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
129791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
130791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
13102bab923SDavid Panariti };
13202bab923SDavid Panariti 
133791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
134791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
135791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
136791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
137791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
138791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
139791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
140791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
141791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
142791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
143791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
144791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
145791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
146791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
147791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
148791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
149791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
150791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
151791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
152791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
153791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
154791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
155791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
156791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
157791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
158791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
159791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
160791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
161791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
162791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
163791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
164791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
165791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
16602bab923SDavid Panariti };
16702bab923SDavid Panariti 
168791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_status_addrs[] = {
169791c4769Sxinhui pan 	(0x000143c2 + 0x00000000),
170791c4769Sxinhui pan 	(0x000143c2 + 0x00000800),
171791c4769Sxinhui pan 	(0x000143c2 + 0x00001000),
172791c4769Sxinhui pan 	(0x000143c2 + 0x00001800),
173791c4769Sxinhui pan 	(0x000543c2 + 0x00000000),
174791c4769Sxinhui pan 	(0x000543c2 + 0x00000800),
175791c4769Sxinhui pan 	(0x000543c2 + 0x00001000),
176791c4769Sxinhui pan 	(0x000543c2 + 0x00001800),
177791c4769Sxinhui pan 	(0x000943c2 + 0x00000000),
178791c4769Sxinhui pan 	(0x000943c2 + 0x00000800),
179791c4769Sxinhui pan 	(0x000943c2 + 0x00001000),
180791c4769Sxinhui pan 	(0x000943c2 + 0x00001800),
181791c4769Sxinhui pan 	(0x000d43c2 + 0x00000000),
182791c4769Sxinhui pan 	(0x000d43c2 + 0x00000800),
183791c4769Sxinhui pan 	(0x000d43c2 + 0x00001000),
184791c4769Sxinhui pan 	(0x000d43c2 + 0x00001800),
185791c4769Sxinhui pan 	(0x001143c2 + 0x00000000),
186791c4769Sxinhui pan 	(0x001143c2 + 0x00000800),
187791c4769Sxinhui pan 	(0x001143c2 + 0x00001000),
188791c4769Sxinhui pan 	(0x001143c2 + 0x00001800),
189791c4769Sxinhui pan 	(0x001543c2 + 0x00000000),
190791c4769Sxinhui pan 	(0x001543c2 + 0x00000800),
191791c4769Sxinhui pan 	(0x001543c2 + 0x00001000),
192791c4769Sxinhui pan 	(0x001543c2 + 0x00001800),
193791c4769Sxinhui pan 	(0x001943c2 + 0x00000000),
194791c4769Sxinhui pan 	(0x001943c2 + 0x00000800),
195791c4769Sxinhui pan 	(0x001943c2 + 0x00001000),
196791c4769Sxinhui pan 	(0x001943c2 + 0x00001800),
197791c4769Sxinhui pan 	(0x001d43c2 + 0x00000000),
198791c4769Sxinhui pan 	(0x001d43c2 + 0x00000800),
199791c4769Sxinhui pan 	(0x001d43c2 + 0x00001000),
200791c4769Sxinhui pan 	(0x001d43c2 + 0x00001800),
20102bab923SDavid Panariti };
20202bab923SDavid Panariti 
203791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
204791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
205791c4769Sxinhui pan 		unsigned type,
206791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
207791c4769Sxinhui pan {
208791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
209791c4769Sxinhui pan 
210791c4769Sxinhui pan 	bits = 0x7f;
211791c4769Sxinhui pan 
212791c4769Sxinhui pan 	switch (state) {
213791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
214791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
215791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
216791c4769Sxinhui pan 			tmp = RREG32(reg);
217791c4769Sxinhui pan 			tmp &= ~bits;
218791c4769Sxinhui pan 			WREG32(reg, tmp);
219791c4769Sxinhui pan 		}
220791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
221791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
222791c4769Sxinhui pan 			tmp = RREG32(reg);
223791c4769Sxinhui pan 			tmp &= ~bits;
224791c4769Sxinhui pan 			WREG32(reg, tmp);
225791c4769Sxinhui pan 		}
226791c4769Sxinhui pan 		break;
227791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
228791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
229791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
230791c4769Sxinhui pan 			tmp = RREG32(reg);
231791c4769Sxinhui pan 			tmp |= bits;
232791c4769Sxinhui pan 			WREG32(reg, tmp);
233791c4769Sxinhui pan 		}
234791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
235791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
236791c4769Sxinhui pan 			tmp = RREG32(reg);
237791c4769Sxinhui pan 			tmp |= bits;
238791c4769Sxinhui pan 			WREG32(reg, tmp);
239791c4769Sxinhui pan 		}
240791c4769Sxinhui pan 		break;
241791c4769Sxinhui pan 	default:
242791c4769Sxinhui pan 		break;
243791c4769Sxinhui pan 	}
244791c4769Sxinhui pan 
245791c4769Sxinhui pan 	return 0;
246791c4769Sxinhui pan }
247791c4769Sxinhui pan 
248791c4769Sxinhui pan static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
24981e02619STao Zhou 		struct ras_err_data *err_data,
250791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
251791c4769Sxinhui pan {
25287d2b92fSTao Zhou 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
25387d2b92fSTao Zhou 		return AMDGPU_RAS_SUCCESS;
25487d2b92fSTao Zhou 
2559b54d201SEric Huang 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
25687d2b92fSTao Zhou 	if (adev->umc.funcs &&
25787d2b92fSTao Zhou 	    adev->umc.funcs->query_ras_error_count)
25881e02619STao Zhou 	    adev->umc.funcs->query_ras_error_count(adev, err_data);
25987d2b92fSTao Zhou 
26087d2b92fSTao Zhou 	if (adev->umc.funcs &&
26187d2b92fSTao Zhou 	    adev->umc.funcs->query_ras_error_address &&
26287d2b92fSTao Zhou 	    adev->umc.max_ras_err_cnt_per_query) {
26387d2b92fSTao Zhou 		err_data->err_addr =
26487d2b92fSTao Zhou 			kcalloc(adev->umc.max_ras_err_cnt_per_query,
26587d2b92fSTao Zhou 				sizeof(struct eeprom_table_record), GFP_KERNEL);
26687d2b92fSTao Zhou 		/* still call query_ras_error_address to clear error status
26787d2b92fSTao Zhou 		 * even NOMEM error is encountered
26887d2b92fSTao Zhou 		 */
26987d2b92fSTao Zhou 		if(!err_data->err_addr)
27087d2b92fSTao Zhou 			DRM_WARN("Failed to alloc memory for umc error address record!\n");
27187d2b92fSTao Zhou 
27213b7c46cSTao Zhou 		/* umc query_ras_error_address is also responsible for clearing
27313b7c46cSTao Zhou 		 * error status
27413b7c46cSTao Zhou 		 */
27513b7c46cSTao Zhou 		adev->umc.funcs->query_ras_error_address(adev, err_data);
27687d2b92fSTao Zhou 	}
27791ba68f8STao Zhou 
27891ba68f8STao Zhou 	/* only uncorrectable error needs gpu reset */
27987d2b92fSTao Zhou 	if (err_data->ue_count) {
28087d2b92fSTao Zhou 		if (err_data->err_addr_cnt &&
28187d2b92fSTao Zhou 		    amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
28287d2b92fSTao Zhou 						err_data->err_addr_cnt))
28387d2b92fSTao Zhou 			DRM_WARN("Failed to add ras bad page!\n");
28487d2b92fSTao Zhou 
285791c4769Sxinhui pan 		amdgpu_ras_reset_gpu(adev, 0);
2867c6e68c7SAndrey Grodzovsky 	}
28791ba68f8STao Zhou 
28887d2b92fSTao Zhou 	kfree(err_data->err_addr);
289bd2280daSTao Zhou 	return AMDGPU_RAS_SUCCESS;
290791c4769Sxinhui pan }
291791c4769Sxinhui pan 
292791c4769Sxinhui pan static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
293791c4769Sxinhui pan 		struct amdgpu_irq_src *source,
294791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
295791c4769Sxinhui pan {
296145b03ebSTao Zhou 	struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
297791c4769Sxinhui pan 	struct ras_dispatch_if ih_data = {
298791c4769Sxinhui pan 		.entry = entry,
299791c4769Sxinhui pan 	};
30014cfde84Sxinhui pan 
30114cfde84Sxinhui pan 	if (!ras_if)
30214cfde84Sxinhui pan 		return 0;
30314cfde84Sxinhui pan 
30414cfde84Sxinhui pan 	ih_data.head = *ras_if;
30514cfde84Sxinhui pan 
306791c4769Sxinhui pan 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
307791c4769Sxinhui pan 	return 0;
308791c4769Sxinhui pan }
309791c4769Sxinhui pan 
310e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
311e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
312e60f8db5SAlex Xie 					unsigned type,
313e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
314e60f8db5SAlex Xie {
315e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
316ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
317e60f8db5SAlex Xie 
31811250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
31911250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
32011250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
32111250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
32211250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
32311250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
32411250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
32511250164SChristian König 
326e60f8db5SAlex Xie 	switch (state) {
327e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
3281daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
329ae6d1416STom St Denis 			hub = &adev->vmhub[j];
330e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
331e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
332e60f8db5SAlex Xie 				tmp = RREG32(reg);
333e60f8db5SAlex Xie 				tmp &= ~bits;
334e60f8db5SAlex Xie 				WREG32(reg, tmp);
335e60f8db5SAlex Xie 			}
336e60f8db5SAlex Xie 		}
337e60f8db5SAlex Xie 		break;
338e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
3391daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
340ae6d1416STom St Denis 			hub = &adev->vmhub[j];
341e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
342e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
343e60f8db5SAlex Xie 				tmp = RREG32(reg);
344e60f8db5SAlex Xie 				tmp |= bits;
345e60f8db5SAlex Xie 				WREG32(reg, tmp);
346e60f8db5SAlex Xie 			}
347e60f8db5SAlex Xie 		}
348e60f8db5SAlex Xie 	default:
349e60f8db5SAlex Xie 		break;
350e60f8db5SAlex Xie 	}
351e60f8db5SAlex Xie 
352e60f8db5SAlex Xie 	return 0;
353e60f8db5SAlex Xie }
354e60f8db5SAlex Xie 
355e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
356e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
357e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
358e60f8db5SAlex Xie {
35951c60898SLe Ma 	struct amdgpu_vmhub *hub;
360c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
3614d6cbde3SFelix Kuehling 	uint32_t status = 0;
362e60f8db5SAlex Xie 	u64 addr;
36351c60898SLe Ma 	char hub_name[10];
364e60f8db5SAlex Xie 
365e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
366e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
367e60f8db5SAlex Xie 
368c1a8abd9SChristian König 	if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
369c1a8abd9SChristian König 						    entry->timestamp))
37022666cc1SChristian König 		return 1; /* This also prevents sending it to KFD */
37122666cc1SChristian König 
37251c60898SLe Ma 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
37351c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub0");
37451c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
37551c60898SLe Ma 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
37651c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub1");
37751c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_1];
37851c60898SLe Ma 	} else {
37951c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "gfxhub0");
38051c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
38151c60898SLe Ma 	}
38251c60898SLe Ma 
383c1a8abd9SChristian König 	/* If it's the first fault for this address, process it normally */
384ec671737SChristian König 	if (retry_fault && !in_interrupt() &&
385ec671737SChristian König 	    amdgpu_vm_handle_fault(adev, entry->pasid, addr))
386ec671737SChristian König 		return 1; /* This also prevents sending it to KFD */
387ec671737SChristian König 
38879a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
38953499173SXiaojie Yuan 		/*
39053499173SXiaojie Yuan 		 * Issue a dummy read to wait for the status register to
39153499173SXiaojie Yuan 		 * be updated to avoid reading an incorrect value due to
39253499173SXiaojie Yuan 		 * the new fast GRBM interface.
39353499173SXiaojie Yuan 		 */
39453499173SXiaojie Yuan 		if (entry->vmid_src == AMDGPU_GFXHUB_0)
39553499173SXiaojie Yuan 			RREG32(hub->vm_l2_pro_fault_status);
39653499173SXiaojie Yuan 
3975a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
3985a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
3994d6cbde3SFelix Kuehling 	}
400e60f8db5SAlex Xie 
4014d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
40205794effSShirish S 		struct amdgpu_task_info task_info;
403efaa9646SAndrey Grodzovsky 
40405794effSShirish S 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
405efaa9646SAndrey Grodzovsky 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
406efaa9646SAndrey Grodzovsky 
4074d6cbde3SFelix Kuehling 		dev_err(adev->dev,
408c468f9e2SChristian König 			"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
409c468f9e2SChristian König 			"pasid:%u, for process %s pid %d thread %s pid %d)\n",
41051c60898SLe Ma 			hub_name, retry_fault ? "retry" : "no-retry",
411c4f46f22SChristian König 			entry->src_id, entry->ring_id, entry->vmid,
412efaa9646SAndrey Grodzovsky 			entry->pasid, task_info.process_name, task_info.tgid,
413efaa9646SAndrey Grodzovsky 			task_info.task_name, task_info.pid);
4145ddd4a9aSYong Zhao 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
41579a0c465SMonk Liu 			addr, entry->client_id);
4165ddd4a9aSYong Zhao 		if (!amdgpu_sriov_vf(adev)) {
4174d6cbde3SFelix Kuehling 			dev_err(adev->dev,
4184d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
4194d6cbde3SFelix Kuehling 				status);
4205ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
4215ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
4225ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
4235ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
4245ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
4255ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
4265ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
4275ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
4285ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
4295ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
4305ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
4315ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
4324e0ae5e2SYong Zhao 			dev_err(adev->dev, "\t RW: 0x%lx\n",
4334e0ae5e2SYong Zhao 				REG_GET_FIELD(status,
4344e0ae5e2SYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, RW));
4355ddd4a9aSYong Zhao 
4365ddd4a9aSYong Zhao 		}
43779a0c465SMonk Liu 	}
438e60f8db5SAlex Xie 
439e60f8db5SAlex Xie 	return 0;
440e60f8db5SAlex Xie }
441e60f8db5SAlex Xie 
442e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
443e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
444e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
445e60f8db5SAlex Xie };
446e60f8db5SAlex Xie 
447791c4769Sxinhui pan 
448791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
449791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
450791c4769Sxinhui pan 	.process = gmc_v9_0_process_ecc_irq,
451791c4769Sxinhui pan };
452791c4769Sxinhui pan 
453e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
454e60f8db5SAlex Xie {
455770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
456770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
457791c4769Sxinhui pan 
458791c4769Sxinhui pan 	adev->gmc.ecc_irq.num_types = 1;
459791c4769Sxinhui pan 	adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
460e60f8db5SAlex Xie }
461e60f8db5SAlex Xie 
4622a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
4632a79d868SYong Zhao 					uint32_t flush_type)
46403f89febSChristian König {
46503f89febSChristian König 	u32 req = 0;
46603f89febSChristian König 
46703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
468c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
4692a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
47003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
47103f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
47203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
47303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
47403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
47503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
47603f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
47703f89febSChristian König 
47803f89febSChristian König 	return req;
47903f89febSChristian König }
48003f89febSChristian König 
481e60f8db5SAlex Xie /*
482e60f8db5SAlex Xie  * GART
483e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
484e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
485e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
486e60f8db5SAlex Xie  */
487e60f8db5SAlex Xie 
488e60f8db5SAlex Xie /**
4892a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
490e60f8db5SAlex Xie  *
491e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
492e60f8db5SAlex Xie  * @vmid: vm instance to flush
4932a79d868SYong Zhao  * @flush_type: the flush type
494e60f8db5SAlex Xie  *
4952a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
496e60f8db5SAlex Xie  */
4973ff98548SOak Zeng static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
4983ff98548SOak Zeng 					uint32_t vmhub, uint32_t flush_type)
499e60f8db5SAlex Xie {
500e60f8db5SAlex Xie 	const unsigned eng = 17;
5013ff98548SOak Zeng 	u32 j, tmp;
5023ff98548SOak Zeng 	struct amdgpu_vmhub *hub;
503e60f8db5SAlex Xie 
5043ff98548SOak Zeng 	BUG_ON(vmhub >= adev->num_vmhubs);
5053ff98548SOak Zeng 
5063ff98548SOak Zeng 	hub = &adev->vmhub[vmhub];
5073ff98548SOak Zeng 	tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
508e60f8db5SAlex Xie 
50982d1a1b1SChengming Gui 	/* This is necessary for a HW workaround under SRIOV as well
51082d1a1b1SChengming Gui 	 * as GFXOFF under bare metal
51182d1a1b1SChengming Gui 	 */
51282d1a1b1SChengming Gui 	if (adev->gfx.kiq.ring.sched.ready &&
51382d1a1b1SChengming Gui 			(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
51482d1a1b1SChengming Gui 			!adev->in_gpu_reset) {
515af5fe1e9SChristian König 		uint32_t req = hub->vm_inv_eng0_req + eng;
516af5fe1e9SChristian König 		uint32_t ack = hub->vm_inv_eng0_ack + eng;
517af5fe1e9SChristian König 
518af5fe1e9SChristian König 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
519af5fe1e9SChristian König 				1 << vmid);
5203ff98548SOak Zeng 		return;
521fc0faf04SEmily Deng 	}
5223890d111SEmily Deng 
5233890d111SEmily Deng 	spin_lock(&adev->gmc.invalidate_lock);
524c7a7266bSXiangliang Yu 	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
52553499173SXiaojie Yuan 
52653499173SXiaojie Yuan 	/*
52753499173SXiaojie Yuan 	 * Issue a dummy read to wait for the ACK register to be cleared
52853499173SXiaojie Yuan 	 * to avoid a false ACK due to the new fast GRBM interface.
52953499173SXiaojie Yuan 	 */
53053499173SXiaojie Yuan 	if (vmhub == AMDGPU_GFXHUB_0)
53153499173SXiaojie Yuan 		RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
53253499173SXiaojie Yuan 
533e60f8db5SAlex Xie 	for (j = 0; j < adev->usec_timeout; j++) {
534c7a7266bSXiangliang Yu 		tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
535396557b0SChristian König 		if (tmp & (1 << vmid))
536e60f8db5SAlex Xie 			break;
537e60f8db5SAlex Xie 		udelay(1);
538e60f8db5SAlex Xie 	}
5393890d111SEmily Deng 	spin_unlock(&adev->gmc.invalidate_lock);
540396557b0SChristian König 	if (j < adev->usec_timeout)
5413ff98548SOak Zeng 		return;
542396557b0SChristian König 
543e60f8db5SAlex Xie 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
544e60f8db5SAlex Xie }
545e60f8db5SAlex Xie 
5469096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
547c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
5489096d6e5SChristian König {
549250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
550250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
5512a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
5529096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
5539096d6e5SChristian König 
5549096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
5559096d6e5SChristian König 			      lower_32_bits(pd_addr));
5569096d6e5SChristian König 
5579096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
5589096d6e5SChristian König 			      upper_32_bits(pd_addr));
5599096d6e5SChristian König 
560f8bc9037SAlex Deucher 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
561f8bc9037SAlex Deucher 					    hub->vm_inv_eng0_ack + eng,
562f8bc9037SAlex Deucher 					    req, 1 << vmid);
563f732b6b3SChristian König 
5649096d6e5SChristian König 	return pd_addr;
5659096d6e5SChristian König }
5669096d6e5SChristian König 
567c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
568c633c00bSChristian König 					unsigned pasid)
569c633c00bSChristian König {
570c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
571c633c00bSChristian König 	uint32_t reg;
572c633c00bSChristian König 
573f2d66571SLe Ma 	/* Do nothing because there's no lut register for mmhub1. */
574f2d66571SLe Ma 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
575f2d66571SLe Ma 		return;
576f2d66571SLe Ma 
577a2d15ed7SLe Ma 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
578c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
579c633c00bSChristian König 	else
580c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
581c633c00bSChristian König 
582c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
583c633c00bSChristian König }
584c633c00bSChristian König 
585e60f8db5SAlex Xie /*
586e60f8db5SAlex Xie  * PTE format on VEGA 10:
587e60f8db5SAlex Xie  * 63:59 reserved
588e60f8db5SAlex Xie  * 58:57 mtype
589e60f8db5SAlex Xie  * 56 F
590e60f8db5SAlex Xie  * 55 L
591e60f8db5SAlex Xie  * 54 P
592e60f8db5SAlex Xie  * 53 SW
593e60f8db5SAlex Xie  * 52 T
594e60f8db5SAlex Xie  * 50:48 reserved
595e60f8db5SAlex Xie  * 47:12 4k physical page base address
596e60f8db5SAlex Xie  * 11:7 fragment
597e60f8db5SAlex Xie  * 6 write
598e60f8db5SAlex Xie  * 5 read
599e60f8db5SAlex Xie  * 4 exe
600e60f8db5SAlex Xie  * 3 Z
601e60f8db5SAlex Xie  * 2 snooped
602e60f8db5SAlex Xie  * 1 system
603e60f8db5SAlex Xie  * 0 valid
604e60f8db5SAlex Xie  *
605e60f8db5SAlex Xie  * PDE format on VEGA 10:
606e60f8db5SAlex Xie  * 63:59 block fragment size
607e60f8db5SAlex Xie  * 58:55 reserved
608e60f8db5SAlex Xie  * 54 P
609e60f8db5SAlex Xie  * 53:48 reserved
610e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
611e60f8db5SAlex Xie  * 5:3 reserved
612e60f8db5SAlex Xie  * 2 C
613e60f8db5SAlex Xie  * 1 system
614e60f8db5SAlex Xie  * 0 valid
615e60f8db5SAlex Xie  */
616e60f8db5SAlex Xie 
61771776b6dSChristian König static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
618e60f8db5SAlex Xie 
619e60f8db5SAlex Xie {
62071776b6dSChristian König 	switch (flags) {
621e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
62271776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
623e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
62471776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
625e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
62671776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
627093e48c0SOak Zeng 	case AMDGPU_VM_MTYPE_RW:
62871776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
629e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
63071776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
631e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
63271776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
633e60f8db5SAlex Xie 	default:
63471776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
635e60f8db5SAlex Xie 	}
636e60f8db5SAlex Xie }
637e60f8db5SAlex Xie 
6383de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
6393de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
640f75e237cSChristian König {
641bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
6423de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
643770d13b1SChristian König 			adev->gmc.vram_start;
6443de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
6456a42fd6fSChristian König 
646770d13b1SChristian König 	if (!adev->gmc.translate_further)
6476a42fd6fSChristian König 		return;
6486a42fd6fSChristian König 
6496a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
6506a42fd6fSChristian König 		/* Set the block fragment size */
6516a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
6526a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
6536a42fd6fSChristian König 
6546a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
6556a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
6566a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
6576a42fd6fSChristian König 		else
6586a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
6596a42fd6fSChristian König 	}
660f75e237cSChristian König }
661f75e237cSChristian König 
662cbfae36cSChristian König static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
663cbfae36cSChristian König 				struct amdgpu_bo_va_mapping *mapping,
664cbfae36cSChristian König 				uint64_t *flags)
665cbfae36cSChristian König {
666cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
667cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
668cbfae36cSChristian König 
669cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
670cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
671cbfae36cSChristian König 
672cbfae36cSChristian König 	if (mapping->flags & AMDGPU_PTE_PRT) {
673cbfae36cSChristian König 		*flags |= AMDGPU_PTE_PRT;
674cbfae36cSChristian König 		*flags &= ~AMDGPU_PTE_VALID;
675cbfae36cSChristian König 	}
676cbfae36cSChristian König 
677cbfae36cSChristian König 	if (adev->asic_type == CHIP_ARCTURUS &&
678cbfae36cSChristian König 	    !(*flags & AMDGPU_PTE_SYSTEM) &&
679cbfae36cSChristian König 	    mapping->bo_va->is_xgmi)
680cbfae36cSChristian König 		*flags |= AMDGPU_PTE_SNOOPED;
681cbfae36cSChristian König }
682cbfae36cSChristian König 
683132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
684132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
6859096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
686c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
68771776b6dSChristian König 	.map_mtype = gmc_v9_0_map_mtype,
688cbfae36cSChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde,
689cbfae36cSChristian König 	.get_vm_pte = gmc_v9_0_get_vm_pte
690e60f8db5SAlex Xie };
691e60f8db5SAlex Xie 
692132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
693e60f8db5SAlex Xie {
694132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
695e60f8db5SAlex Xie }
696e60f8db5SAlex Xie 
6975b6b35aaSHawking Zhang static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
6985b6b35aaSHawking Zhang {
6995b6b35aaSHawking Zhang 	switch (adev->asic_type) {
700e7da754bSMonk Liu 	case CHIP_VEGA10:
701e7da754bSMonk Liu 		adev->umc.funcs = &umc_v6_0_funcs;
702e7da754bSMonk Liu 		break;
7035b6b35aaSHawking Zhang 	case CHIP_VEGA20:
7043aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
7053aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
7063aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
7073aacf4eaSTao Zhou 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET;
7083aacf4eaSTao Zhou 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
709045c0216STao Zhou 		adev->umc.funcs = &umc_v6_1_funcs;
7105b6b35aaSHawking Zhang 		break;
7115b6b35aaSHawking Zhang 	default:
7125b6b35aaSHawking Zhang 		break;
7135b6b35aaSHawking Zhang 	}
7145b6b35aaSHawking Zhang }
7155b6b35aaSHawking Zhang 
7163d093da0STao Zhou static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
7173d093da0STao Zhou {
7183d093da0STao Zhou 	switch (adev->asic_type) {
7193d093da0STao Zhou 	case CHIP_VEGA20:
7203d093da0STao Zhou 		adev->mmhub_funcs = &mmhub_v1_0_funcs;
7213d093da0STao Zhou 		break;
7223d093da0STao Zhou 	default:
7233d093da0STao Zhou 		break;
7243d093da0STao Zhou 	}
7253d093da0STao Zhou }
7263d093da0STao Zhou 
727e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
728e60f8db5SAlex Xie {
729e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
730e60f8db5SAlex Xie 
731132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
732e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
7335b6b35aaSHawking Zhang 	gmc_v9_0_set_umc_funcs(adev);
7343d093da0STao Zhou 	gmc_v9_0_set_mmhub_funcs(adev);
735e60f8db5SAlex Xie 
736770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
737770d13b1SChristian König 	adev->gmc.shared_aperture_end =
738770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
739bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
740770d13b1SChristian König 	adev->gmc.private_aperture_end =
741770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
742a7ea6548SAlex Deucher 
743e60f8db5SAlex Xie 	return 0;
744e60f8db5SAlex Xie }
745e60f8db5SAlex Xie 
746cd2b5623SAlex Deucher static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
747cd2b5623SAlex Deucher {
748cd2b5623SAlex Deucher 
749cd2b5623SAlex Deucher 	/*
750cd2b5623SAlex Deucher 	 * TODO:
751cd2b5623SAlex Deucher 	 * Currently there is a bug where some memory client outside
752cd2b5623SAlex Deucher 	 * of the driver writes to first 8M of VRAM on S3 resume,
753cd2b5623SAlex Deucher 	 * this overrides GART which by default gets placed in first 8M and
754cd2b5623SAlex Deucher 	 * causes VM_FAULTS once GTT is accessed.
755cd2b5623SAlex Deucher 	 * Keep the stolen memory reservation until the while this is not solved.
756cd2b5623SAlex Deucher 	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
757cd2b5623SAlex Deucher 	 */
758cd2b5623SAlex Deucher 	switch (adev->asic_type) {
75995010ba7SAlex Deucher 	case CHIP_VEGA10:
7606abc0c8fSAlex Deucher 	case CHIP_RAVEN:
761bfa3a9bbSHawking Zhang 	case CHIP_ARCTURUS:
7628787ee01SHuang Rui 	case CHIP_RENOIR:
76302122753SFlora Cui 		return true;
7646abc0c8fSAlex Deucher 	case CHIP_VEGA12:
765cd2b5623SAlex Deucher 	case CHIP_VEGA20:
766cd2b5623SAlex Deucher 	default:
7676abc0c8fSAlex Deucher 		return false;
768cd2b5623SAlex Deucher 	}
769cd2b5623SAlex Deucher }
770cd2b5623SAlex Deucher 
771c713a461SEvan Quan static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
772c713a461SEvan Quan {
773c713a461SEvan Quan 	struct amdgpu_ring *ring;
774c713a461SEvan Quan 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
775c8a6e2a3SLe Ma 		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
776c8a6e2a3SLe Ma 		GFXHUB_FREE_VM_INV_ENGS_BITMAP};
777c713a461SEvan Quan 	unsigned i;
778c713a461SEvan Quan 	unsigned vmhub, inv_eng;
779c713a461SEvan Quan 
780c713a461SEvan Quan 	for (i = 0; i < adev->num_rings; ++i) {
781c713a461SEvan Quan 		ring = adev->rings[i];
782c713a461SEvan Quan 		vmhub = ring->funcs->vmhub;
783c713a461SEvan Quan 
784c713a461SEvan Quan 		inv_eng = ffs(vm_inv_engs[vmhub]);
785c713a461SEvan Quan 		if (!inv_eng) {
786c713a461SEvan Quan 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
787c713a461SEvan Quan 				ring->name);
788c713a461SEvan Quan 			return -EINVAL;
789c713a461SEvan Quan 		}
790c713a461SEvan Quan 
791c713a461SEvan Quan 		ring->vm_inv_eng = inv_eng - 1;
79272464382SChristian König 		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
793c713a461SEvan Quan 
794c713a461SEvan Quan 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
795c713a461SEvan Quan 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
796c713a461SEvan Quan 	}
797c713a461SEvan Quan 
798c713a461SEvan Quan 	return 0;
799c713a461SEvan Quan }
800c713a461SEvan Quan 
801145b03ebSTao Zhou static int gmc_v9_0_ecc_late_init(void *handle)
802145b03ebSTao Zhou {
803145b03ebSTao Zhou 	int r;
8042452e778SHawking Zhang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
8052452e778SHawking Zhang 	struct ras_ih_if umc_ih_info = {
8062452e778SHawking Zhang 		.cb = gmc_v9_0_process_ras_data_cb,
807145b03ebSTao Zhou 	};
808145b03ebSTao Zhou 
80986edcc7dSTao Zhou 	if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
81086edcc7dSTao Zhou 		r = adev->umc.funcs->ras_late_init(adev, &umc_ih_info);
811145b03ebSTao Zhou 		if (r)
812a85eff14SHawking Zhang 			return r;
81386edcc7dSTao Zhou 	}
8142452e778SHawking Zhang 
8154ce71be6SHawking Zhang 	if (adev->mmhub_funcs && adev->mmhub_funcs->ras_late_init) {
816dda79907SHawking Zhang 		r = adev->mmhub_funcs->ras_late_init(adev);
8172452e778SHawking Zhang 		if (r)
818dda79907SHawking Zhang 			return r;
819dda79907SHawking Zhang 	}
820029fbd43SHawking Zhang 
821029fbd43SHawking Zhang 	return amdgpu_xgmi_ras_late_init(adev);
822145b03ebSTao Zhou }
823791c4769Sxinhui pan 
824e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
825e60f8db5SAlex Xie {
826e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
827c5b6e585STao Zhou 	int r;
8284789c463SChristian König 
829cd2b5623SAlex Deucher 	if (!gmc_v9_0_keep_stolen_memory(adev))
830cd2b5623SAlex Deucher 		amdgpu_bo_late_init(adev);
8316f752ec2SAndrey Grodzovsky 
832c713a461SEvan Quan 	r = gmc_v9_0_allocate_vm_inv_eng(adev);
833c713a461SEvan Quan 	if (r)
834c713a461SEvan Quan 		return r;
835f49ea9f8SHawking Zhang 	/* Check if ecc is available */
836f49ea9f8SHawking Zhang 	if (!amdgpu_sriov_vf(adev)) {
837f49ea9f8SHawking Zhang 		switch (adev->asic_type) {
838f49ea9f8SHawking Zhang 		case CHIP_VEGA10:
839f49ea9f8SHawking Zhang 		case CHIP_VEGA20:
840f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_mem_ecc_supported(adev);
841f49ea9f8SHawking Zhang 			if (!r) {
84202bab923SDavid Panariti 				DRM_INFO("ECC is not present.\n");
843f49ea9f8SHawking Zhang 				if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
844e1d1a772SAlex Deucher 					adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
84502bab923SDavid Panariti 			} else {
846f49ea9f8SHawking Zhang 				DRM_INFO("ECC is active.\n");
847f49ea9f8SHawking Zhang 			}
848f49ea9f8SHawking Zhang 
849f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_sram_ecc_supported(adev);
850f49ea9f8SHawking Zhang 			if (!r) {
851f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is not present.\n");
852f49ea9f8SHawking Zhang 			} else {
853f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is active.\n");
854f49ea9f8SHawking Zhang 			}
855f49ea9f8SHawking Zhang 			break;
856f49ea9f8SHawking Zhang 		default:
857f49ea9f8SHawking Zhang 			break;
85802bab923SDavid Panariti 		}
8595ba4fa35SAlex Deucher 	}
86002bab923SDavid Panariti 
861791c4769Sxinhui pan 	r = gmc_v9_0_ecc_late_init(handle);
862791c4769Sxinhui pan 	if (r)
863e60f8db5SAlex Xie 		return r;
864e60f8db5SAlex Xie 
865770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
866e60f8db5SAlex Xie }
867e60f8db5SAlex Xie 
868e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
869770d13b1SChristian König 					struct amdgpu_gmc *mc)
870e60f8db5SAlex Xie {
871e60f8db5SAlex Xie 	u64 base = 0;
8729d4f837aSFrank.Min 
87351cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
87451cce480SLe Ma 		base = mmhub_v9_4_get_fb_location(adev);
8759d4f837aSFrank.Min 	else if (!amdgpu_sriov_vf(adev))
876e60f8db5SAlex Xie 		base = mmhub_v1_0_get_fb_location(adev);
8779d4f837aSFrank.Min 
8786fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
8796fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
88083afe835SOak Zeng 	amdgpu_gmc_vram_location(adev, mc, base);
881961c75cfSChristian König 	amdgpu_gmc_gart_location(adev, mc);
882c3e1b43cSChristian König 	amdgpu_gmc_agp_location(adev, mc);
883e60f8db5SAlex Xie 	/* base offset of vram pages */
884e60f8db5SAlex Xie 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
8856fdd68b1SAlex Deucher 
8866fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
8876fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
8886fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
889e60f8db5SAlex Xie }
890e60f8db5SAlex Xie 
891e60f8db5SAlex Xie /**
892e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
893e60f8db5SAlex Xie  *
894e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
895e60f8db5SAlex Xie  *
896e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
897e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
898e60f8db5SAlex Xie  * Returns 0 for success.
899e60f8db5SAlex Xie  */
900e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
901e60f8db5SAlex Xie {
902e60f8db5SAlex Xie 	int r;
903e60f8db5SAlex Xie 
904e60f8db5SAlex Xie 	/* size in MB on si */
905770d13b1SChristian König 	adev->gmc.mc_vram_size =
906bebc0762SHawking Zhang 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
907770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
908e60f8db5SAlex Xie 
909e60f8db5SAlex Xie 	if (!(adev->flags & AMD_IS_APU)) {
910e60f8db5SAlex Xie 		r = amdgpu_device_resize_fb_bar(adev);
911e60f8db5SAlex Xie 		if (r)
912e60f8db5SAlex Xie 			return r;
913e60f8db5SAlex Xie 	}
914770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
915770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
916e60f8db5SAlex Xie 
917156a81beSChunming Zhou #ifdef CONFIG_X86_64
918156a81beSChunming Zhou 	if (adev->flags & AMD_IS_APU) {
919156a81beSChunming Zhou 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
920156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
921156a81beSChunming Zhou 	}
922156a81beSChunming Zhou #endif
923e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
924770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
925770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
926770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
927e60f8db5SAlex Xie 
928e60f8db5SAlex Xie 	/* set the gart size */
929e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1) {
930e60f8db5SAlex Xie 		switch (adev->asic_type) {
931e60f8db5SAlex Xie 		case CHIP_VEGA10:  /* all engines support GPUVM */
932273a14cdSAlex Deucher 		case CHIP_VEGA12:  /* all engines support GPUVM */
933d96b428cSFeifei Xu 		case CHIP_VEGA20:
9343de2ff5dSLe Ma 		case CHIP_ARCTURUS:
935e60f8db5SAlex Xie 		default:
936fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
937e60f8db5SAlex Xie 			break;
938e60f8db5SAlex Xie 		case CHIP_RAVEN:   /* DCE SG support */
9398787ee01SHuang Rui 		case CHIP_RENOIR:
940770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
941e60f8db5SAlex Xie 			break;
942e60f8db5SAlex Xie 		}
943e60f8db5SAlex Xie 	} else {
944770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
945e60f8db5SAlex Xie 	}
946e60f8db5SAlex Xie 
947770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
948e60f8db5SAlex Xie 
949e60f8db5SAlex Xie 	return 0;
950e60f8db5SAlex Xie }
951e60f8db5SAlex Xie 
952e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
953e60f8db5SAlex Xie {
954e60f8db5SAlex Xie 	int r;
955e60f8db5SAlex Xie 
9561123b989SChristian König 	if (adev->gart.bo) {
957e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
958e60f8db5SAlex Xie 		return 0;
959e60f8db5SAlex Xie 	}
960e60f8db5SAlex Xie 	/* Initialize common gart structure */
961e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
962e60f8db5SAlex Xie 	if (r)
963e60f8db5SAlex Xie 		return r;
964e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
9657596ab68SHawking Zhang 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
966e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
967e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
968e60f8db5SAlex Xie }
969e60f8db5SAlex Xie 
970ebdef28eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
971ebdef28eSAlex Deucher {
972bfa3a9bbSHawking Zhang 	u32 d1vga_control;
973ebdef28eSAlex Deucher 	unsigned size;
974ebdef28eSAlex Deucher 
9756f752ec2SAndrey Grodzovsky 	/*
9766f752ec2SAndrey Grodzovsky 	 * TODO Remove once GART corruption is resolved
9776f752ec2SAndrey Grodzovsky 	 * Check related code in gmc_v9_0_sw_fini
9786f752ec2SAndrey Grodzovsky 	 * */
979cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
980cd2b5623SAlex Deucher 		return 9 * 1024 * 1024;
9816f752ec2SAndrey Grodzovsky 
982bfa3a9bbSHawking Zhang 	d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
983ebdef28eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
984ebdef28eSAlex Deucher 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
985ebdef28eSAlex Deucher 	} else {
986ebdef28eSAlex Deucher 		u32 viewport;
987ebdef28eSAlex Deucher 
988ebdef28eSAlex Deucher 		switch (adev->asic_type) {
989ebdef28eSAlex Deucher 		case CHIP_RAVEN:
9908787ee01SHuang Rui 		case CHIP_RENOIR:
991ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
992ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
993ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
994ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport,
995ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
996ebdef28eSAlex Deucher 				4);
997ebdef28eSAlex Deucher 			break;
998ebdef28eSAlex Deucher 		case CHIP_VEGA10:
999ebdef28eSAlex Deucher 		case CHIP_VEGA12:
1000cd2b5623SAlex Deucher 		case CHIP_VEGA20:
1001ebdef28eSAlex Deucher 		default:
1002ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1003ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1004ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1005ebdef28eSAlex Deucher 				4);
1006ebdef28eSAlex Deucher 			break;
1007ebdef28eSAlex Deucher 		}
1008ebdef28eSAlex Deucher 	}
1009ebdef28eSAlex Deucher 	/* return 0 if the pre-OS buffer uses up most of vram */
1010ebdef28eSAlex Deucher 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
1011ebdef28eSAlex Deucher 		return 0;
10126f752ec2SAndrey Grodzovsky 
1013ebdef28eSAlex Deucher 	return size;
1014ebdef28eSAlex Deucher }
1015ebdef28eSAlex Deucher 
1016e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
1017e60f8db5SAlex Xie {
1018631cdbd2SAlex Deucher 	int r, vram_width = 0, vram_type = 0;
1019e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1020e60f8db5SAlex Xie 
1021e60f8db5SAlex Xie 	gfxhub_v1_0_init(adev);
102251cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
102351cce480SLe Ma 		mmhub_v9_4_init(adev);
102451cce480SLe Ma 	else
1025e60f8db5SAlex Xie 		mmhub_v1_0_init(adev);
1026e60f8db5SAlex Xie 
1027770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
1028e60f8db5SAlex Xie 
1029631cdbd2SAlex Deucher 	r = amdgpu_atomfirmware_get_vram_info(adev, &vram_width, &vram_type);
1030631cdbd2SAlex Deucher 	if (amdgpu_sriov_vf(adev))
1031631cdbd2SAlex Deucher 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1032631cdbd2SAlex Deucher 		 * and DF related registers is not readable, seems hardcord is the
1033631cdbd2SAlex Deucher 		 * only way to set the correct vram_width
1034631cdbd2SAlex Deucher 		 */
1035631cdbd2SAlex Deucher 		adev->gmc.vram_width = 2048;
1036631cdbd2SAlex Deucher 	else if (amdgpu_emu_mode != 1)
1037631cdbd2SAlex Deucher 		adev->gmc.vram_width = vram_width;
1038631cdbd2SAlex Deucher 
1039631cdbd2SAlex Deucher 	if (!adev->gmc.vram_width) {
1040631cdbd2SAlex Deucher 		int chansize, numchan;
1041631cdbd2SAlex Deucher 
1042631cdbd2SAlex Deucher 		/* hbm memory channel size */
1043631cdbd2SAlex Deucher 		if (adev->flags & AMD_IS_APU)
1044631cdbd2SAlex Deucher 			chansize = 64;
1045631cdbd2SAlex Deucher 		else
1046631cdbd2SAlex Deucher 			chansize = 128;
1047631cdbd2SAlex Deucher 
1048631cdbd2SAlex Deucher 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
1049631cdbd2SAlex Deucher 		adev->gmc.vram_width = numchan * chansize;
1050631cdbd2SAlex Deucher 	}
1051631cdbd2SAlex Deucher 
1052631cdbd2SAlex Deucher 	adev->gmc.vram_type = vram_type;
1053e60f8db5SAlex Xie 	switch (adev->asic_type) {
1054e60f8db5SAlex Xie 	case CHIP_RAVEN:
10551daa2bfaSLe Ma 		adev->num_vmhubs = 2;
10561daa2bfaSLe Ma 
10576a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1058f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
10596a42fd6fSChristian König 		} else {
10606a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
10616a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1062770d13b1SChristian König 			adev->gmc.translate_further =
10636a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
10646a42fd6fSChristian König 		}
1065e60f8db5SAlex Xie 		break;
1066e60f8db5SAlex Xie 	case CHIP_VEGA10:
1067273a14cdSAlex Deucher 	case CHIP_VEGA12:
1068d96b428cSFeifei Xu 	case CHIP_VEGA20:
10698787ee01SHuang Rui 	case CHIP_RENOIR:
10701daa2bfaSLe Ma 		adev->num_vmhubs = 2;
10711daa2bfaSLe Ma 
10728787ee01SHuang Rui 
1073e60f8db5SAlex Xie 		/*
1074e60f8db5SAlex Xie 		 * To fulfill 4-level page support,
1075e60f8db5SAlex Xie 		 * vm size is 256TB (48bit), maximum size of Vega10,
1076e60f8db5SAlex Xie 		 * block size 512 (9bit)
1077e60f8db5SAlex Xie 		 */
1078cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1079cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
1080cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1081cdba61daSwentalou 		else
1082f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1083e60f8db5SAlex Xie 		break;
10843de2ff5dSLe Ma 	case CHIP_ARCTURUS:
1085c8a6e2a3SLe Ma 		adev->num_vmhubs = 3;
1086c8a6e2a3SLe Ma 
10873de2ff5dSLe Ma 		/* Keep the vm size same with Vega20 */
10883de2ff5dSLe Ma 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
10893de2ff5dSLe Ma 		break;
1090e60f8db5SAlex Xie 	default:
1091e60f8db5SAlex Xie 		break;
1092e60f8db5SAlex Xie 	}
1093e60f8db5SAlex Xie 
1094e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
109544a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1096770d13b1SChristian König 				&adev->gmc.vm_fault);
109730da7bb1SChristian König 	if (r)
109830da7bb1SChristian König 		return r;
109930da7bb1SChristian König 
11007d19b15fSLe Ma 	if (adev->asic_type == CHIP_ARCTURUS) {
11017d19b15fSLe Ma 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
11027d19b15fSLe Ma 					&adev->gmc.vm_fault);
11037d19b15fSLe Ma 		if (r)
11047d19b15fSLe Ma 			return r;
11057d19b15fSLe Ma 	}
11067d19b15fSLe Ma 
110744a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1108770d13b1SChristian König 				&adev->gmc.vm_fault);
1109e60f8db5SAlex Xie 
1110e60f8db5SAlex Xie 	if (r)
1111e60f8db5SAlex Xie 		return r;
1112e60f8db5SAlex Xie 
1113791c4769Sxinhui pan 	/* interrupt sent to DF. */
1114791c4769Sxinhui pan 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1115791c4769Sxinhui pan 			&adev->gmc.ecc_irq);
1116791c4769Sxinhui pan 	if (r)
1117791c4769Sxinhui pan 		return r;
1118791c4769Sxinhui pan 
1119e60f8db5SAlex Xie 	/* Set the internal MC address mask
1120e60f8db5SAlex Xie 	 * This is the max address of the GPU's
1121e60f8db5SAlex Xie 	 * internal address space.
1122e60f8db5SAlex Xie 	 */
1123770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1124e60f8db5SAlex Xie 
1125244511f3SChristoph Hellwig 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1126e60f8db5SAlex Xie 	if (r) {
1127e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1128244511f3SChristoph Hellwig 		return r;
1129e60f8db5SAlex Xie 	}
1130244511f3SChristoph Hellwig 	adev->need_swiotlb = drm_need_swiotlb(44);
1131e60f8db5SAlex Xie 
113247622ba0SAlex Deucher 	if (adev->gmc.xgmi.supported) {
1133bf0a60b7SAlex Deucher 		r = gfxhub_v1_1_get_xgmi_info(adev);
1134bf0a60b7SAlex Deucher 		if (r)
1135bf0a60b7SAlex Deucher 			return r;
1136bf0a60b7SAlex Deucher 	}
1137bf0a60b7SAlex Deucher 
1138e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
1139e60f8db5SAlex Xie 	if (r)
1140e60f8db5SAlex Xie 		return r;
1141e60f8db5SAlex Xie 
1142ebdef28eSAlex Deucher 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1143ebdef28eSAlex Deucher 
1144e60f8db5SAlex Xie 	/* Memory manager */
1145e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
1146e60f8db5SAlex Xie 	if (r)
1147e60f8db5SAlex Xie 		return r;
1148e60f8db5SAlex Xie 
1149e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
1150e60f8db5SAlex Xie 	if (r)
1151e60f8db5SAlex Xie 		return r;
1152e60f8db5SAlex Xie 
115305ec3edaSChristian König 	/*
115405ec3edaSChristian König 	 * number of VMs
115505ec3edaSChristian König 	 * VMID 0 is reserved for System
115605ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
115705ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
115805ec3edaSChristian König 	 */
1159a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1160a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1161c8a6e2a3SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
116205ec3edaSChristian König 
116305ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
116405ec3edaSChristian König 
116505ec3edaSChristian König 	return 0;
1166e60f8db5SAlex Xie }
1167e60f8db5SAlex Xie 
1168e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
1169e60f8db5SAlex Xie {
1170e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1171994dcfaaSTianci.Yin 	void *stolen_vga_buf;
1172e60f8db5SAlex Xie 
1173791c4769Sxinhui pan 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1174145b03ebSTao Zhou 			adev->gmc.umc_ras_if) {
1175145b03ebSTao Zhou 		struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
1176791c4769Sxinhui pan 		struct ras_ih_if ih_info = {
1177791c4769Sxinhui pan 			.head = *ras_if,
1178791c4769Sxinhui pan 		};
1179791c4769Sxinhui pan 
1180791c4769Sxinhui pan 		/* remove fs first */
1181791c4769Sxinhui pan 		amdgpu_ras_debugfs_remove(adev, ras_if);
1182791c4769Sxinhui pan 		amdgpu_ras_sysfs_remove(adev, ras_if);
1183791c4769Sxinhui pan 		/* remove the IH */
1184791c4769Sxinhui pan 		amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1185791c4769Sxinhui pan 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1186791c4769Sxinhui pan 		kfree(ras_if);
1187791c4769Sxinhui pan 	}
1188791c4769Sxinhui pan 
1189145b03ebSTao Zhou 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
1190145b03ebSTao Zhou 			adev->gmc.mmhub_ras_if) {
1191145b03ebSTao Zhou 		struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
1192145b03ebSTao Zhou 
1193145b03ebSTao Zhou 		/* remove fs and disable ras feature */
1194145b03ebSTao Zhou 		amdgpu_ras_debugfs_remove(adev, ras_if);
1195145b03ebSTao Zhou 		amdgpu_ras_sysfs_remove(adev, ras_if);
1196145b03ebSTao Zhou 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1197145b03ebSTao Zhou 		kfree(ras_if);
1198145b03ebSTao Zhou 	}
1199145b03ebSTao Zhou 
1200f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
1201e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
12026f752ec2SAndrey Grodzovsky 
1203cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
1204994dcfaaSTianci.Yin 		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
12056f752ec2SAndrey Grodzovsky 
1206a3d9103eSAndrey Grodzovsky 	amdgpu_gart_table_vram_free(adev);
1207e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
1208a3d9103eSAndrey Grodzovsky 	amdgpu_gart_fini(adev);
1209e60f8db5SAlex Xie 
1210e60f8db5SAlex Xie 	return 0;
1211e60f8db5SAlex Xie }
1212e60f8db5SAlex Xie 
1213e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1214e60f8db5SAlex Xie {
1215946a4d5bSShaoyun Liu 
1216e60f8db5SAlex Xie 	switch (adev->asic_type) {
1217e60f8db5SAlex Xie 	case CHIP_VEGA10:
12184cd4c5c0SMonk Liu 		if (amdgpu_sriov_vf(adev))
121998cad2deSTrigger Huang 			break;
122098cad2deSTrigger Huang 		/* fall through */
1221d96b428cSFeifei Xu 	case CHIP_VEGA20:
1222946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
12235c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
1224c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1225946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
12265c583018SEvan Quan 						golden_settings_athub_1_0_0,
1227c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1228e60f8db5SAlex Xie 		break;
1229273a14cdSAlex Deucher 	case CHIP_VEGA12:
1230273a14cdSAlex Deucher 		break;
1231e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
12328787ee01SHuang Rui 		/* TODO for renoir */
1233946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
12345c583018SEvan Quan 						golden_settings_athub_1_0_0,
1235c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1236e4f3abaaSChunming Zhou 		break;
1237e60f8db5SAlex Xie 	default:
1238e60f8db5SAlex Xie 		break;
1239e60f8db5SAlex Xie 	}
1240e60f8db5SAlex Xie }
1241e60f8db5SAlex Xie 
1242e60f8db5SAlex Xie /**
1243e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1244e60f8db5SAlex Xie  *
1245e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1246e60f8db5SAlex Xie  */
1247e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1248e60f8db5SAlex Xie {
12493ff98548SOak Zeng 	int r, i;
1250e60f8db5SAlex Xie 	bool value;
1251e60f8db5SAlex Xie 	u32 tmp;
1252e60f8db5SAlex Xie 
12539c3f2b54SAlex Deucher 	amdgpu_device_program_register_sequence(adev,
1254e60f8db5SAlex Xie 						golden_settings_vega10_hdp,
1255c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_vega10_hdp));
1256e60f8db5SAlex Xie 
12571123b989SChristian König 	if (adev->gart.bo == NULL) {
1258e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1259e60f8db5SAlex Xie 		return -EINVAL;
1260e60f8db5SAlex Xie 	}
1261ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
1262ce1b1b66SMonk Liu 	if (r)
1263ce1b1b66SMonk Liu 		return r;
1264e60f8db5SAlex Xie 
12652fcd43ceSHawking Zhang 	switch (adev->asic_type) {
12662fcd43ceSHawking Zhang 	case CHIP_RAVEN:
12678787ee01SHuang Rui 		/* TODO for renoir */
1268f8386b35SHawking Zhang 		mmhub_v1_0_update_power_gating(adev, true);
12692fcd43ceSHawking Zhang 		break;
12702fcd43ceSHawking Zhang 	default:
12712fcd43ceSHawking Zhang 		break;
12722fcd43ceSHawking Zhang 	}
12732fcd43ceSHawking Zhang 
1274e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
1275e60f8db5SAlex Xie 	if (r)
1276e60f8db5SAlex Xie 		return r;
1277e60f8db5SAlex Xie 
127851cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
127951cce480SLe Ma 		r = mmhub_v9_4_gart_enable(adev);
128051cce480SLe Ma 	else
1281e60f8db5SAlex Xie 		r = mmhub_v1_0_gart_enable(adev);
1282e60f8db5SAlex Xie 	if (r)
1283e60f8db5SAlex Xie 		return r;
1284e60f8db5SAlex Xie 
1285846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1286e60f8db5SAlex Xie 
1287b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1288b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1289e60f8db5SAlex Xie 
1290fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1291fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1292fe2b5323STiecheng Zhou 
12931d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
1294bebc0762SHawking Zhang 	adev->nbio.funcs->hdp_flush(adev, NULL);
12951d4e0a8cSMonk Liu 
1296e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1297e60f8db5SAlex Xie 		value = false;
1298e60f8db5SAlex Xie 	else
1299e60f8db5SAlex Xie 		value = true;
1300e60f8db5SAlex Xie 
1301e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
130251cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
130351cce480SLe Ma 		mmhub_v9_4_set_fault_enable_default(adev, value);
130451cce480SLe Ma 	else
1305e60f8db5SAlex Xie 		mmhub_v1_0_set_fault_enable_default(adev, value);
13063ff98548SOak Zeng 
13073ff98548SOak Zeng 	for (i = 0; i < adev->num_vmhubs; ++i)
13083ff98548SOak Zeng 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1309e60f8db5SAlex Xie 
1310e7da754bSMonk Liu 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1311e7da754bSMonk Liu 		adev->umc.funcs->init_registers(adev);
1312e7da754bSMonk Liu 
1313e60f8db5SAlex Xie 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1314770d13b1SChristian König 		 (unsigned)(adev->gmc.gart_size >> 20),
13154e830fb1SChristian König 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1316e60f8db5SAlex Xie 	adev->gart.ready = true;
1317e60f8db5SAlex Xie 	return 0;
1318e60f8db5SAlex Xie }
1319e60f8db5SAlex Xie 
1320e60f8db5SAlex Xie static int gmc_v9_0_hw_init(void *handle)
1321e60f8db5SAlex Xie {
1322e60f8db5SAlex Xie 	int r;
1323e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1324e60f8db5SAlex Xie 
1325e60f8db5SAlex Xie 	/* The sequence of these two function calls matters.*/
1326e60f8db5SAlex Xie 	gmc_v9_0_init_golden_registers(adev);
1327e60f8db5SAlex Xie 
1328edca2d05SAlex Deucher 	if (adev->mode_info.num_crtc) {
1329edca2d05SAlex Deucher 		/* Lockout access through VGA aperture*/
13304d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1331edca2d05SAlex Deucher 
1332edca2d05SAlex Deucher 		/* disable VGA render */
13334d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1334edca2d05SAlex Deucher 	}
1335edca2d05SAlex Deucher 
1336e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1337e60f8db5SAlex Xie 
1338e60f8db5SAlex Xie 	return r;
1339e60f8db5SAlex Xie }
1340e60f8db5SAlex Xie 
1341e60f8db5SAlex Xie /**
1342e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1343e60f8db5SAlex Xie  *
1344e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1345e60f8db5SAlex Xie  *
1346e60f8db5SAlex Xie  * This disables all VM page table.
1347e60f8db5SAlex Xie  */
1348e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1349e60f8db5SAlex Xie {
1350e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
135151cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
135251cce480SLe Ma 		mmhub_v9_4_gart_disable(adev);
135351cce480SLe Ma 	else
1354e60f8db5SAlex Xie 		mmhub_v1_0_gart_disable(adev);
1355ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1356e60f8db5SAlex Xie }
1357e60f8db5SAlex Xie 
1358e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1359e60f8db5SAlex Xie {
1360e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1361e60f8db5SAlex Xie 
13625dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
13635dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
13645dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
13655dd696aeSTrigger Huang 		return 0;
13665dd696aeSTrigger Huang 	}
13675dd696aeSTrigger Huang 
1368791c4769Sxinhui pan 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1369770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1370e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1371e60f8db5SAlex Xie 
1372e60f8db5SAlex Xie 	return 0;
1373e60f8db5SAlex Xie }
1374e60f8db5SAlex Xie 
1375e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1376e60f8db5SAlex Xie {
1377e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1378e60f8db5SAlex Xie 
1379f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1380e60f8db5SAlex Xie }
1381e60f8db5SAlex Xie 
1382e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1383e60f8db5SAlex Xie {
1384e60f8db5SAlex Xie 	int r;
1385e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1386e60f8db5SAlex Xie 
1387e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1388e60f8db5SAlex Xie 	if (r)
1389e60f8db5SAlex Xie 		return r;
1390e60f8db5SAlex Xie 
1391620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1392e60f8db5SAlex Xie 
139332601d48SChristian König 	return 0;
1394e60f8db5SAlex Xie }
1395e60f8db5SAlex Xie 
1396e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1397e60f8db5SAlex Xie {
1398e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1399e60f8db5SAlex Xie 	return true;
1400e60f8db5SAlex Xie }
1401e60f8db5SAlex Xie 
1402e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1403e60f8db5SAlex Xie {
1404e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1405e60f8db5SAlex Xie 	return 0;
1406e60f8db5SAlex Xie }
1407e60f8db5SAlex Xie 
1408e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1409e60f8db5SAlex Xie {
1410e60f8db5SAlex Xie 	/* XXX for emulation.*/
1411e60f8db5SAlex Xie 	return 0;
1412e60f8db5SAlex Xie }
1413e60f8db5SAlex Xie 
1414e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1415e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1416e60f8db5SAlex Xie {
1417d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1418d5583d4fSHuang Rui 
141951cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
1420cb15e804SLe Ma 		mmhub_v9_4_set_clockgating(adev, state);
1421cb15e804SLe Ma 	else
1422bee7b51aSLe Ma 		mmhub_v1_0_set_clockgating(adev, state);
1423bee7b51aSLe Ma 
1424bee7b51aSLe Ma 	athub_v1_0_set_clockgating(adev, state);
1425bee7b51aSLe Ma 
1426bee7b51aSLe Ma 	return 0;
1427e60f8db5SAlex Xie }
1428e60f8db5SAlex Xie 
142913052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
143013052be5SHuang Rui {
143113052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
143213052be5SHuang Rui 
143351cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
1434cb15e804SLe Ma 		mmhub_v9_4_get_clockgating(adev, flags);
1435cb15e804SLe Ma 	else
143613052be5SHuang Rui 		mmhub_v1_0_get_clockgating(adev, flags);
1437bee7b51aSLe Ma 
1438bee7b51aSLe Ma 	athub_v1_0_get_clockgating(adev, flags);
143913052be5SHuang Rui }
144013052be5SHuang Rui 
1441e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1442e60f8db5SAlex Xie 					enum amd_powergating_state state)
1443e60f8db5SAlex Xie {
1444e60f8db5SAlex Xie 	return 0;
1445e60f8db5SAlex Xie }
1446e60f8db5SAlex Xie 
1447e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1448e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1449e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1450e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1451e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1452e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1453e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1454e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1455e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1456e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1457e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1458e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1459e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1460e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1461e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
146213052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1463e60f8db5SAlex Xie };
1464e60f8db5SAlex Xie 
1465e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1466e60f8db5SAlex Xie {
1467e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1468e60f8db5SAlex Xie 	.major = 9,
1469e60f8db5SAlex Xie 	.minor = 0,
1470e60f8db5SAlex Xie 	.rev = 0,
1471e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1472e60f8db5SAlex Xie };
1473