xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 51cce480)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23f867723bSSam Ravnborg 
24e60f8db5SAlex Xie #include <linux/firmware.h>
25f867723bSSam Ravnborg #include <linux/pci.h>
26f867723bSSam Ravnborg 
27fd5fd480SChunming Zhou #include <drm/drm_cache.h>
28f867723bSSam Ravnborg 
29e60f8db5SAlex Xie #include "amdgpu.h"
30e60f8db5SAlex Xie #include "gmc_v9_0.h"
318d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
322cddc50eSHuang Rui #include "amdgpu_gem.h"
33e60f8db5SAlex Xie 
3475199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3575199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
36cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
37135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
38135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
39fb960bd2SFeifei Xu #include "vega10_enum.h"
4065417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
416ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
42250b4228SChristian König #include "oss/osssys_4_0_offset.h"
43e60f8db5SAlex Xie 
44946a4d5bSShaoyun Liu #include "soc15.h"
45e60f8db5SAlex Xie #include "soc15_common.h"
4690c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
47e60f8db5SAlex Xie 
48e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
49e60f8db5SAlex Xie #include "mmhub_v1_0.h"
50bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
5151cce480SLe Ma #include "mmhub_v9_4.h"
52e60f8db5SAlex Xie 
5344a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
5444a99b65SAndrey Grodzovsky 
55791c4769Sxinhui pan #include "amdgpu_ras.h"
56791c4769Sxinhui pan 
57ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
58ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
59ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
60ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
61ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
62ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
63ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
64ebdef28eSAlex Deucher 
65e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
66e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
67e60f8db5SAlex Xie 
68e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
69e60f8db5SAlex Xie {
70e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
71e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
72e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
73e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
74e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
75e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
76e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
77e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
78e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
79e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
80e60f8db5SAlex Xie };
81e60f8db5SAlex Xie 
82946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
835c583018SEvan Quan {
84946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
85946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
865c583018SEvan Quan };
875c583018SEvan Quan 
88946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
895c583018SEvan Quan {
90946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
91946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
925c583018SEvan Quan };
935c583018SEvan Quan 
94791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
95791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
96791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
97791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
98791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
99791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
100791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
101791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
102791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
103791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
104791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
105791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
106791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
107791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
108791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
109791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
110791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
111791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
112791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
113791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
114791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
115791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
116791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
117791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
118791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
119791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
120791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
121791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
122791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
123791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
124791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
125791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
126791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
12702bab923SDavid Panariti };
12802bab923SDavid Panariti 
129791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
130791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
131791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
132791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
133791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
134791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
135791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
136791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
137791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
138791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
139791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
140791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
141791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
142791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
143791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
144791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
145791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
146791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
147791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
148791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
149791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
150791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
151791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
152791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
153791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
154791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
155791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
156791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
157791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
158791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
159791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
160791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
161791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
16202bab923SDavid Panariti };
16302bab923SDavid Panariti 
164791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_status_addrs[] = {
165791c4769Sxinhui pan 	(0x000143c2 + 0x00000000),
166791c4769Sxinhui pan 	(0x000143c2 + 0x00000800),
167791c4769Sxinhui pan 	(0x000143c2 + 0x00001000),
168791c4769Sxinhui pan 	(0x000143c2 + 0x00001800),
169791c4769Sxinhui pan 	(0x000543c2 + 0x00000000),
170791c4769Sxinhui pan 	(0x000543c2 + 0x00000800),
171791c4769Sxinhui pan 	(0x000543c2 + 0x00001000),
172791c4769Sxinhui pan 	(0x000543c2 + 0x00001800),
173791c4769Sxinhui pan 	(0x000943c2 + 0x00000000),
174791c4769Sxinhui pan 	(0x000943c2 + 0x00000800),
175791c4769Sxinhui pan 	(0x000943c2 + 0x00001000),
176791c4769Sxinhui pan 	(0x000943c2 + 0x00001800),
177791c4769Sxinhui pan 	(0x000d43c2 + 0x00000000),
178791c4769Sxinhui pan 	(0x000d43c2 + 0x00000800),
179791c4769Sxinhui pan 	(0x000d43c2 + 0x00001000),
180791c4769Sxinhui pan 	(0x000d43c2 + 0x00001800),
181791c4769Sxinhui pan 	(0x001143c2 + 0x00000000),
182791c4769Sxinhui pan 	(0x001143c2 + 0x00000800),
183791c4769Sxinhui pan 	(0x001143c2 + 0x00001000),
184791c4769Sxinhui pan 	(0x001143c2 + 0x00001800),
185791c4769Sxinhui pan 	(0x001543c2 + 0x00000000),
186791c4769Sxinhui pan 	(0x001543c2 + 0x00000800),
187791c4769Sxinhui pan 	(0x001543c2 + 0x00001000),
188791c4769Sxinhui pan 	(0x001543c2 + 0x00001800),
189791c4769Sxinhui pan 	(0x001943c2 + 0x00000000),
190791c4769Sxinhui pan 	(0x001943c2 + 0x00000800),
191791c4769Sxinhui pan 	(0x001943c2 + 0x00001000),
192791c4769Sxinhui pan 	(0x001943c2 + 0x00001800),
193791c4769Sxinhui pan 	(0x001d43c2 + 0x00000000),
194791c4769Sxinhui pan 	(0x001d43c2 + 0x00000800),
195791c4769Sxinhui pan 	(0x001d43c2 + 0x00001000),
196791c4769Sxinhui pan 	(0x001d43c2 + 0x00001800),
19702bab923SDavid Panariti };
19802bab923SDavid Panariti 
199791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
200791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
201791c4769Sxinhui pan 		unsigned type,
202791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
203791c4769Sxinhui pan {
204791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
205791c4769Sxinhui pan 
206791c4769Sxinhui pan 	bits = 0x7f;
207791c4769Sxinhui pan 
208791c4769Sxinhui pan 	switch (state) {
209791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
210791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
211791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
212791c4769Sxinhui pan 			tmp = RREG32(reg);
213791c4769Sxinhui pan 			tmp &= ~bits;
214791c4769Sxinhui pan 			WREG32(reg, tmp);
215791c4769Sxinhui pan 		}
216791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
217791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
218791c4769Sxinhui pan 			tmp = RREG32(reg);
219791c4769Sxinhui pan 			tmp &= ~bits;
220791c4769Sxinhui pan 			WREG32(reg, tmp);
221791c4769Sxinhui pan 		}
222791c4769Sxinhui pan 		break;
223791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
224791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
225791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
226791c4769Sxinhui pan 			tmp = RREG32(reg);
227791c4769Sxinhui pan 			tmp |= bits;
228791c4769Sxinhui pan 			WREG32(reg, tmp);
229791c4769Sxinhui pan 		}
230791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
231791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
232791c4769Sxinhui pan 			tmp = RREG32(reg);
233791c4769Sxinhui pan 			tmp |= bits;
234791c4769Sxinhui pan 			WREG32(reg, tmp);
235791c4769Sxinhui pan 		}
236791c4769Sxinhui pan 		break;
237791c4769Sxinhui pan 	default:
238791c4769Sxinhui pan 		break;
239791c4769Sxinhui pan 	}
240791c4769Sxinhui pan 
241791c4769Sxinhui pan 	return 0;
242791c4769Sxinhui pan }
243791c4769Sxinhui pan 
244791c4769Sxinhui pan static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
245791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
246791c4769Sxinhui pan {
2479b54d201SEric Huang 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
248791c4769Sxinhui pan 	amdgpu_ras_reset_gpu(adev, 0);
249791c4769Sxinhui pan 	return AMDGPU_RAS_UE;
250791c4769Sxinhui pan }
251791c4769Sxinhui pan 
252791c4769Sxinhui pan static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
253791c4769Sxinhui pan 		struct amdgpu_irq_src *source,
254791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
255791c4769Sxinhui pan {
25614cfde84Sxinhui pan 	struct ras_common_if *ras_if = adev->gmc.ras_if;
257791c4769Sxinhui pan 	struct ras_dispatch_if ih_data = {
258791c4769Sxinhui pan 		.entry = entry,
259791c4769Sxinhui pan 	};
26014cfde84Sxinhui pan 
26114cfde84Sxinhui pan 	if (!ras_if)
26214cfde84Sxinhui pan 		return 0;
26314cfde84Sxinhui pan 
26414cfde84Sxinhui pan 	ih_data.head = *ras_if;
26514cfde84Sxinhui pan 
266791c4769Sxinhui pan 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
267791c4769Sxinhui pan 	return 0;
268791c4769Sxinhui pan }
269791c4769Sxinhui pan 
270e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
271e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
272e60f8db5SAlex Xie 					unsigned type,
273e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
274e60f8db5SAlex Xie {
275e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
276ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
277e60f8db5SAlex Xie 
27811250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
27911250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
28011250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
28111250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
28211250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
28311250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
28411250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
28511250164SChristian König 
286e60f8db5SAlex Xie 	switch (state) {
287e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
2881daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
289ae6d1416STom St Denis 			hub = &adev->vmhub[j];
290e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
291e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
292e60f8db5SAlex Xie 				tmp = RREG32(reg);
293e60f8db5SAlex Xie 				tmp &= ~bits;
294e60f8db5SAlex Xie 				WREG32(reg, tmp);
295e60f8db5SAlex Xie 			}
296e60f8db5SAlex Xie 		}
297e60f8db5SAlex Xie 		break;
298e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
2991daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
300ae6d1416STom St Denis 			hub = &adev->vmhub[j];
301e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
302e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
303e60f8db5SAlex Xie 				tmp = RREG32(reg);
304e60f8db5SAlex Xie 				tmp |= bits;
305e60f8db5SAlex Xie 				WREG32(reg, tmp);
306e60f8db5SAlex Xie 			}
307e60f8db5SAlex Xie 		}
308e60f8db5SAlex Xie 	default:
309e60f8db5SAlex Xie 		break;
310e60f8db5SAlex Xie 	}
311e60f8db5SAlex Xie 
312e60f8db5SAlex Xie 	return 0;
313e60f8db5SAlex Xie }
314e60f8db5SAlex Xie 
315e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
316e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
317e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
318e60f8db5SAlex Xie {
319c4f46f22SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
320c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
3214d6cbde3SFelix Kuehling 	uint32_t status = 0;
322e60f8db5SAlex Xie 	u64 addr;
323e60f8db5SAlex Xie 
324e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
325e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
326e60f8db5SAlex Xie 
327c1a8abd9SChristian König 	if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
328c1a8abd9SChristian König 						    entry->timestamp))
32922666cc1SChristian König 		return 1; /* This also prevents sending it to KFD */
33022666cc1SChristian König 
331c1a8abd9SChristian König 	/* If it's the first fault for this address, process it normally */
33279a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
3335a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
3345a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
3354d6cbde3SFelix Kuehling 	}
336e60f8db5SAlex Xie 
3374d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
33805794effSShirish S 		struct amdgpu_task_info task_info;
339efaa9646SAndrey Grodzovsky 
34005794effSShirish S 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
341efaa9646SAndrey Grodzovsky 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
342efaa9646SAndrey Grodzovsky 
3434d6cbde3SFelix Kuehling 		dev_err(adev->dev,
344c468f9e2SChristian König 			"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
345c468f9e2SChristian König 			"pasid:%u, for process %s pid %d thread %s pid %d)\n",
346c4f46f22SChristian König 			entry->vmid_src ? "mmhub" : "gfxhub",
347c468f9e2SChristian König 			retry_fault ? "retry" : "no-retry",
348c4f46f22SChristian König 			entry->src_id, entry->ring_id, entry->vmid,
349efaa9646SAndrey Grodzovsky 			entry->pasid, task_info.process_name, task_info.tgid,
350efaa9646SAndrey Grodzovsky 			task_info.task_name, task_info.pid);
3517d0aa376SAndrey Grodzovsky 		dev_err(adev->dev, "  in page starting at address 0x%016llx from %d\n",
35279a0c465SMonk Liu 			addr, entry->client_id);
3534d6cbde3SFelix Kuehling 		if (!amdgpu_sriov_vf(adev))
3544d6cbde3SFelix Kuehling 			dev_err(adev->dev,
3554d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
3564d6cbde3SFelix Kuehling 				status);
35779a0c465SMonk Liu 	}
358e60f8db5SAlex Xie 
359e60f8db5SAlex Xie 	return 0;
360e60f8db5SAlex Xie }
361e60f8db5SAlex Xie 
362e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
363e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
364e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
365e60f8db5SAlex Xie };
366e60f8db5SAlex Xie 
367791c4769Sxinhui pan 
368791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
369791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
370791c4769Sxinhui pan 	.process = gmc_v9_0_process_ecc_irq,
371791c4769Sxinhui pan };
372791c4769Sxinhui pan 
373e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
374e60f8db5SAlex Xie {
375770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
376770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
377791c4769Sxinhui pan 
378791c4769Sxinhui pan 	adev->gmc.ecc_irq.num_types = 1;
379791c4769Sxinhui pan 	adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
380e60f8db5SAlex Xie }
381e60f8db5SAlex Xie 
3822a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
3832a79d868SYong Zhao 					uint32_t flush_type)
38403f89febSChristian König {
38503f89febSChristian König 	u32 req = 0;
38603f89febSChristian König 
38703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
388c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
3892a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
39003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
39103f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
39203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
39303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
39403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
39503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
39603f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
39703f89febSChristian König 
39803f89febSChristian König 	return req;
39903f89febSChristian König }
40003f89febSChristian König 
401e60f8db5SAlex Xie /*
402e60f8db5SAlex Xie  * GART
403e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
404e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
405e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
406e60f8db5SAlex Xie  */
407e60f8db5SAlex Xie 
408e60f8db5SAlex Xie /**
4092a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
410e60f8db5SAlex Xie  *
411e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
412e60f8db5SAlex Xie  * @vmid: vm instance to flush
4132a79d868SYong Zhao  * @flush_type: the flush type
414e60f8db5SAlex Xie  *
4152a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
416e60f8db5SAlex Xie  */
417132f34e4SChristian König static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
4182a79d868SYong Zhao 				uint32_t vmid, uint32_t flush_type)
419e60f8db5SAlex Xie {
420e60f8db5SAlex Xie 	const unsigned eng = 17;
421e60f8db5SAlex Xie 	unsigned i, j;
422e60f8db5SAlex Xie 
4231daa2bfaSLe Ma 	for (i = 0; i < adev->num_vmhubs; ++i) {
424e60f8db5SAlex Xie 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
4252a79d868SYong Zhao 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
426e60f8db5SAlex Xie 
42782d1a1b1SChengming Gui 		/* This is necessary for a HW workaround under SRIOV as well
42882d1a1b1SChengming Gui 		 * as GFXOFF under bare metal
42982d1a1b1SChengming Gui 		 */
43082d1a1b1SChengming Gui 		if (adev->gfx.kiq.ring.sched.ready &&
43182d1a1b1SChengming Gui 		    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
43282d1a1b1SChengming Gui 		    !adev->in_gpu_reset) {
433af5fe1e9SChristian König 			uint32_t req = hub->vm_inv_eng0_req + eng;
434af5fe1e9SChristian König 			uint32_t ack = hub->vm_inv_eng0_ack + eng;
435af5fe1e9SChristian König 
436af5fe1e9SChristian König 			amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
437af5fe1e9SChristian König 							   1 << vmid);
4383890d111SEmily Deng 			continue;
439fc0faf04SEmily Deng 		}
4403890d111SEmily Deng 
4413890d111SEmily Deng 		spin_lock(&adev->gmc.invalidate_lock);
442c7a7266bSXiangliang Yu 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
443e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
444c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
445396557b0SChristian König 			if (tmp & (1 << vmid))
446e60f8db5SAlex Xie 				break;
447e60f8db5SAlex Xie 			udelay(1);
448e60f8db5SAlex Xie 		}
4493890d111SEmily Deng 		spin_unlock(&adev->gmc.invalidate_lock);
450396557b0SChristian König 		if (j < adev->usec_timeout)
451e60f8db5SAlex Xie 			continue;
452396557b0SChristian König 
453e60f8db5SAlex Xie 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
454e60f8db5SAlex Xie 	}
455e60f8db5SAlex Xie }
456e60f8db5SAlex Xie 
4579096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
458c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
4599096d6e5SChristian König {
460250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
461250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
4622a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
4639096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
4649096d6e5SChristian König 
4659096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
4669096d6e5SChristian König 			      lower_32_bits(pd_addr));
4679096d6e5SChristian König 
4689096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
4699096d6e5SChristian König 			      upper_32_bits(pd_addr));
4709096d6e5SChristian König 
471f8bc9037SAlex Deucher 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
472f8bc9037SAlex Deucher 					    hub->vm_inv_eng0_ack + eng,
473f8bc9037SAlex Deucher 					    req, 1 << vmid);
474f732b6b3SChristian König 
4759096d6e5SChristian König 	return pd_addr;
4769096d6e5SChristian König }
4779096d6e5SChristian König 
478c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
479c633c00bSChristian König 					unsigned pasid)
480c633c00bSChristian König {
481c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
482c633c00bSChristian König 	uint32_t reg;
483c633c00bSChristian König 
484a2d15ed7SLe Ma 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
485c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
486c633c00bSChristian König 	else
487c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
488c633c00bSChristian König 
489c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
490c633c00bSChristian König }
491c633c00bSChristian König 
492e60f8db5SAlex Xie /*
493e60f8db5SAlex Xie  * PTE format on VEGA 10:
494e60f8db5SAlex Xie  * 63:59 reserved
495e60f8db5SAlex Xie  * 58:57 mtype
496e60f8db5SAlex Xie  * 56 F
497e60f8db5SAlex Xie  * 55 L
498e60f8db5SAlex Xie  * 54 P
499e60f8db5SAlex Xie  * 53 SW
500e60f8db5SAlex Xie  * 52 T
501e60f8db5SAlex Xie  * 50:48 reserved
502e60f8db5SAlex Xie  * 47:12 4k physical page base address
503e60f8db5SAlex Xie  * 11:7 fragment
504e60f8db5SAlex Xie  * 6 write
505e60f8db5SAlex Xie  * 5 read
506e60f8db5SAlex Xie  * 4 exe
507e60f8db5SAlex Xie  * 3 Z
508e60f8db5SAlex Xie  * 2 snooped
509e60f8db5SAlex Xie  * 1 system
510e60f8db5SAlex Xie  * 0 valid
511e60f8db5SAlex Xie  *
512e60f8db5SAlex Xie  * PDE format on VEGA 10:
513e60f8db5SAlex Xie  * 63:59 block fragment size
514e60f8db5SAlex Xie  * 58:55 reserved
515e60f8db5SAlex Xie  * 54 P
516e60f8db5SAlex Xie  * 53:48 reserved
517e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
518e60f8db5SAlex Xie  * 5:3 reserved
519e60f8db5SAlex Xie  * 2 C
520e60f8db5SAlex Xie  * 1 system
521e60f8db5SAlex Xie  * 0 valid
522e60f8db5SAlex Xie  */
523e60f8db5SAlex Xie 
524e60f8db5SAlex Xie static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
525e60f8db5SAlex Xie 						uint32_t flags)
526e60f8db5SAlex Xie 
527e60f8db5SAlex Xie {
528e60f8db5SAlex Xie 	uint64_t pte_flag = 0;
529e60f8db5SAlex Xie 
530e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
531e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
532e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_READABLE)
533e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_READABLE;
534e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
535e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_WRITEABLE;
536e60f8db5SAlex Xie 
537e60f8db5SAlex Xie 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
538e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
5397596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
540e60f8db5SAlex Xie 		break;
541e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
5427596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
543e60f8db5SAlex Xie 		break;
544e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
5457596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
546e60f8db5SAlex Xie 		break;
547e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
5487596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
549e60f8db5SAlex Xie 		break;
550e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
5517596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
552e60f8db5SAlex Xie 		break;
553e60f8db5SAlex Xie 	default:
5547596ab68SHawking Zhang 		pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
555e60f8db5SAlex Xie 		break;
556e60f8db5SAlex Xie 	}
557e60f8db5SAlex Xie 
558e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_PRT)
559e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_PRT;
560e60f8db5SAlex Xie 
561e60f8db5SAlex Xie 	return pte_flag;
562e60f8db5SAlex Xie }
563e60f8db5SAlex Xie 
5643de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
5653de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
566f75e237cSChristian König {
567bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
5683de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
569770d13b1SChristian König 			adev->gmc.vram_start;
5703de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
5716a42fd6fSChristian König 
572770d13b1SChristian König 	if (!adev->gmc.translate_further)
5736a42fd6fSChristian König 		return;
5746a42fd6fSChristian König 
5756a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
5766a42fd6fSChristian König 		/* Set the block fragment size */
5776a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
5786a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
5796a42fd6fSChristian König 
5806a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
5816a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
5826a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
5836a42fd6fSChristian König 		else
5846a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
5856a42fd6fSChristian König 	}
586f75e237cSChristian König }
587f75e237cSChristian König 
588132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
589132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
5909096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
591c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
592b1166325SChristian König 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
593b1166325SChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde
594e60f8db5SAlex Xie };
595e60f8db5SAlex Xie 
596132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
597e60f8db5SAlex Xie {
598132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
599e60f8db5SAlex Xie }
600e60f8db5SAlex Xie 
601e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
602e60f8db5SAlex Xie {
603e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
604e60f8db5SAlex Xie 
605132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
606e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
607e60f8db5SAlex Xie 
608770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
609770d13b1SChristian König 	adev->gmc.shared_aperture_end =
610770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
611bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
612770d13b1SChristian König 	adev->gmc.private_aperture_end =
613770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
614a7ea6548SAlex Deucher 
615e60f8db5SAlex Xie 	return 0;
616e60f8db5SAlex Xie }
617e60f8db5SAlex Xie 
618cd2b5623SAlex Deucher static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
619cd2b5623SAlex Deucher {
620cd2b5623SAlex Deucher 
621cd2b5623SAlex Deucher 	/*
622cd2b5623SAlex Deucher 	 * TODO:
623cd2b5623SAlex Deucher 	 * Currently there is a bug where some memory client outside
624cd2b5623SAlex Deucher 	 * of the driver writes to first 8M of VRAM on S3 resume,
625cd2b5623SAlex Deucher 	 * this overrides GART which by default gets placed in first 8M and
626cd2b5623SAlex Deucher 	 * causes VM_FAULTS once GTT is accessed.
627cd2b5623SAlex Deucher 	 * Keep the stolen memory reservation until the while this is not solved.
628cd2b5623SAlex Deucher 	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
629cd2b5623SAlex Deucher 	 */
630cd2b5623SAlex Deucher 	switch (adev->asic_type) {
63195010ba7SAlex Deucher 	case CHIP_VEGA10:
6326abc0c8fSAlex Deucher 	case CHIP_RAVEN:
63302122753SFlora Cui 		return true;
6346abc0c8fSAlex Deucher 	case CHIP_VEGA12:
635cd2b5623SAlex Deucher 	case CHIP_VEGA20:
636cd2b5623SAlex Deucher 	default:
6376abc0c8fSAlex Deucher 		return false;
638cd2b5623SAlex Deucher 	}
639cd2b5623SAlex Deucher }
640cd2b5623SAlex Deucher 
641c713a461SEvan Quan static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
642c713a461SEvan Quan {
643c713a461SEvan Quan 	struct amdgpu_ring *ring;
644c713a461SEvan Quan 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
645c8a6e2a3SLe Ma 		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
646c8a6e2a3SLe Ma 		GFXHUB_FREE_VM_INV_ENGS_BITMAP};
647c713a461SEvan Quan 	unsigned i;
648c713a461SEvan Quan 	unsigned vmhub, inv_eng;
649c713a461SEvan Quan 
650c713a461SEvan Quan 	for (i = 0; i < adev->num_rings; ++i) {
651c713a461SEvan Quan 		ring = adev->rings[i];
652c713a461SEvan Quan 		vmhub = ring->funcs->vmhub;
653c713a461SEvan Quan 
654c713a461SEvan Quan 		inv_eng = ffs(vm_inv_engs[vmhub]);
655c713a461SEvan Quan 		if (!inv_eng) {
656c713a461SEvan Quan 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
657c713a461SEvan Quan 				ring->name);
658c713a461SEvan Quan 			return -EINVAL;
659c713a461SEvan Quan 		}
660c713a461SEvan Quan 
661c713a461SEvan Quan 		ring->vm_inv_eng = inv_eng - 1;
66272464382SChristian König 		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
663c713a461SEvan Quan 
664c713a461SEvan Quan 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
665c713a461SEvan Quan 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
666c713a461SEvan Quan 	}
667c713a461SEvan Quan 
668c713a461SEvan Quan 	return 0;
669c713a461SEvan Quan }
670c713a461SEvan Quan 
671791c4769Sxinhui pan static int gmc_v9_0_ecc_late_init(void *handle)
672791c4769Sxinhui pan {
673791c4769Sxinhui pan 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
674791c4769Sxinhui pan 	struct ras_common_if **ras_if = &adev->gmc.ras_if;
675791c4769Sxinhui pan 	struct ras_ih_if ih_info = {
676791c4769Sxinhui pan 		.cb = gmc_v9_0_process_ras_data_cb,
677791c4769Sxinhui pan 	};
678791c4769Sxinhui pan 	struct ras_fs_if fs_info = {
679791c4769Sxinhui pan 		.sysfs_name = "umc_err_count",
680791c4769Sxinhui pan 		.debugfs_name = "umc_err_inject",
681791c4769Sxinhui pan 	};
682791c4769Sxinhui pan 	struct ras_common_if ras_block = {
683791c4769Sxinhui pan 		.block = AMDGPU_RAS_BLOCK__UMC,
684791c4769Sxinhui pan 		.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
685791c4769Sxinhui pan 		.sub_block_index = 0,
686791c4769Sxinhui pan 		.name = "umc",
687791c4769Sxinhui pan 	};
688791c4769Sxinhui pan 	int r;
689791c4769Sxinhui pan 
690791c4769Sxinhui pan 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
69153d65054Sxinhui pan 		amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
692791c4769Sxinhui pan 		return 0;
693791c4769Sxinhui pan 	}
694acbbee01Sxinhui pan 	/* handle resume path. */
6956121366bSxinhui pan 	if (*ras_if) {
6966121366bSxinhui pan 		/* resend ras TA enable cmd during resume.
6976121366bSxinhui pan 		 * prepare to handle failure.
6986121366bSxinhui pan 		 */
6996121366bSxinhui pan 		ih_info.head = **ras_if;
7006121366bSxinhui pan 		r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
7016121366bSxinhui pan 		if (r) {
7026121366bSxinhui pan 			if (r == -EAGAIN) {
7036121366bSxinhui pan 				/* request a gpu reset. will run again. */
7046121366bSxinhui pan 				amdgpu_ras_request_reset_on_boot(adev,
7056121366bSxinhui pan 						AMDGPU_RAS_BLOCK__UMC);
7066121366bSxinhui pan 				return 0;
7076121366bSxinhui pan 			}
7086121366bSxinhui pan 			/* fail to enable ras, cleanup all. */
7096121366bSxinhui pan 			goto irq;
7106121366bSxinhui pan 		}
7116121366bSxinhui pan 		/* enable successfully. continue. */
712acbbee01Sxinhui pan 		goto resume;
7136121366bSxinhui pan 	}
714791c4769Sxinhui pan 
715791c4769Sxinhui pan 	*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
716791c4769Sxinhui pan 	if (!*ras_if)
717791c4769Sxinhui pan 		return -ENOMEM;
718791c4769Sxinhui pan 
719791c4769Sxinhui pan 	**ras_if = ras_block;
720791c4769Sxinhui pan 
72153d65054Sxinhui pan 	r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
72236810fdbSxinhui pan 	if (r) {
72336810fdbSxinhui pan 		if (r == -EAGAIN) {
72436810fdbSxinhui pan 			amdgpu_ras_request_reset_on_boot(adev,
72536810fdbSxinhui pan 					AMDGPU_RAS_BLOCK__UMC);
72636810fdbSxinhui pan 			r = 0;
72736810fdbSxinhui pan 		}
728791c4769Sxinhui pan 		goto feature;
72936810fdbSxinhui pan 	}
730791c4769Sxinhui pan 
731791c4769Sxinhui pan 	ih_info.head = **ras_if;
732791c4769Sxinhui pan 	fs_info.head = **ras_if;
733791c4769Sxinhui pan 
734791c4769Sxinhui pan 	r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
735791c4769Sxinhui pan 	if (r)
736791c4769Sxinhui pan 		goto interrupt;
737791c4769Sxinhui pan 
738450f30eaSGreg Kroah-Hartman 	amdgpu_ras_debugfs_create(adev, &fs_info);
739791c4769Sxinhui pan 
740791c4769Sxinhui pan 	r = amdgpu_ras_sysfs_create(adev, &fs_info);
741791c4769Sxinhui pan 	if (r)
742791c4769Sxinhui pan 		goto sysfs;
743acbbee01Sxinhui pan resume:
744791c4769Sxinhui pan 	r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
745791c4769Sxinhui pan 	if (r)
746791c4769Sxinhui pan 		goto irq;
747791c4769Sxinhui pan 
748791c4769Sxinhui pan 	return 0;
749791c4769Sxinhui pan irq:
750791c4769Sxinhui pan 	amdgpu_ras_sysfs_remove(adev, *ras_if);
751791c4769Sxinhui pan sysfs:
752791c4769Sxinhui pan 	amdgpu_ras_debugfs_remove(adev, *ras_if);
753791c4769Sxinhui pan 	amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
754791c4769Sxinhui pan interrupt:
755791c4769Sxinhui pan 	amdgpu_ras_feature_enable(adev, *ras_if, 0);
756791c4769Sxinhui pan feature:
757791c4769Sxinhui pan 	kfree(*ras_if);
758791c4769Sxinhui pan 	*ras_if = NULL;
75936810fdbSxinhui pan 	return r;
760791c4769Sxinhui pan }
761791c4769Sxinhui pan 
762791c4769Sxinhui pan 
763e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
764e60f8db5SAlex Xie {
765e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
766f49ea9f8SHawking Zhang 	bool r;
7674789c463SChristian König 
768cd2b5623SAlex Deucher 	if (!gmc_v9_0_keep_stolen_memory(adev))
769cd2b5623SAlex Deucher 		amdgpu_bo_late_init(adev);
7706f752ec2SAndrey Grodzovsky 
771c713a461SEvan Quan 	r = gmc_v9_0_allocate_vm_inv_eng(adev);
772c713a461SEvan Quan 	if (r)
773c713a461SEvan Quan 		return r;
774f49ea9f8SHawking Zhang 	/* Check if ecc is available */
775f49ea9f8SHawking Zhang 	if (!amdgpu_sriov_vf(adev)) {
776f49ea9f8SHawking Zhang 		switch (adev->asic_type) {
777f49ea9f8SHawking Zhang 		case CHIP_VEGA10:
778f49ea9f8SHawking Zhang 		case CHIP_VEGA20:
779f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_mem_ecc_supported(adev);
780f49ea9f8SHawking Zhang 			if (!r) {
78102bab923SDavid Panariti 				DRM_INFO("ECC is not present.\n");
782f49ea9f8SHawking Zhang 				if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
783e1d1a772SAlex Deucher 					adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
78402bab923SDavid Panariti 			} else {
785f49ea9f8SHawking Zhang 				DRM_INFO("ECC is active.\n");
786f49ea9f8SHawking Zhang 			}
787f49ea9f8SHawking Zhang 
788f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_sram_ecc_supported(adev);
789f49ea9f8SHawking Zhang 			if (!r) {
790f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is not present.\n");
791f49ea9f8SHawking Zhang 			} else {
792f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is active.\n");
793f49ea9f8SHawking Zhang 			}
794f49ea9f8SHawking Zhang 			break;
795f49ea9f8SHawking Zhang 		default:
796f49ea9f8SHawking Zhang 			break;
79702bab923SDavid Panariti 		}
7985ba4fa35SAlex Deucher 	}
79902bab923SDavid Panariti 
800791c4769Sxinhui pan 	r = gmc_v9_0_ecc_late_init(handle);
801791c4769Sxinhui pan 	if (r)
802e60f8db5SAlex Xie 		return r;
803e60f8db5SAlex Xie 
804770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
805e60f8db5SAlex Xie }
806e60f8db5SAlex Xie 
807e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
808770d13b1SChristian König 					struct amdgpu_gmc *mc)
809e60f8db5SAlex Xie {
810e60f8db5SAlex Xie 	u64 base = 0;
81151cce480SLe Ma 	if (!amdgpu_sriov_vf(adev)) {
81251cce480SLe Ma 		if (adev->asic_type == CHIP_ARCTURUS)
81351cce480SLe Ma 			base = mmhub_v9_4_get_fb_location(adev);
81451cce480SLe Ma 		else
815e60f8db5SAlex Xie 			base = mmhub_v1_0_get_fb_location(adev);
81651cce480SLe Ma 	}
8176fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
8186fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
81983afe835SOak Zeng 	amdgpu_gmc_vram_location(adev, mc, base);
820961c75cfSChristian König 	amdgpu_gmc_gart_location(adev, mc);
821c3e1b43cSChristian König 	if (!amdgpu_sriov_vf(adev))
822c3e1b43cSChristian König 		amdgpu_gmc_agp_location(adev, mc);
823e60f8db5SAlex Xie 	/* base offset of vram pages */
824e60f8db5SAlex Xie 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
8256fdd68b1SAlex Deucher 
8266fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
8276fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
8286fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
829e60f8db5SAlex Xie }
830e60f8db5SAlex Xie 
831e60f8db5SAlex Xie /**
832e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
833e60f8db5SAlex Xie  *
834e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
835e60f8db5SAlex Xie  *
836e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
837e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
838e60f8db5SAlex Xie  * Returns 0 for success.
839e60f8db5SAlex Xie  */
840e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
841e60f8db5SAlex Xie {
842e60f8db5SAlex Xie 	int chansize, numchan;
843e60f8db5SAlex Xie 	int r;
844e60f8db5SAlex Xie 
845067e75b3SAlex Deucher 	if (amdgpu_sriov_vf(adev)) {
846067e75b3SAlex Deucher 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
847067e75b3SAlex Deucher 		 * and DF related registers is not readable, seems hardcord is the
848067e75b3SAlex Deucher 		 * only way to set the correct vram_width
849067e75b3SAlex Deucher 		 */
850067e75b3SAlex Deucher 		adev->gmc.vram_width = 2048;
851067e75b3SAlex Deucher 	} else if (amdgpu_emu_mode != 1) {
852770d13b1SChristian König 		adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
853067e75b3SAlex Deucher 	}
854067e75b3SAlex Deucher 
855770d13b1SChristian König 	if (!adev->gmc.vram_width) {
856e60f8db5SAlex Xie 		/* hbm memory channel size */
857585b7f16STom St Denis 		if (adev->flags & AMD_IS_APU)
858585b7f16STom St Denis 			chansize = 64;
859585b7f16STom St Denis 		else
860e60f8db5SAlex Xie 			chansize = 128;
861e60f8db5SAlex Xie 
862070706c0SHawking Zhang 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
863770d13b1SChristian König 		adev->gmc.vram_width = numchan * chansize;
864e60f8db5SAlex Xie 	}
865e60f8db5SAlex Xie 
866e60f8db5SAlex Xie 	/* size in MB on si */
867770d13b1SChristian König 	adev->gmc.mc_vram_size =
868bf383fb6SAlex Deucher 		adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
869770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
870e60f8db5SAlex Xie 
871e60f8db5SAlex Xie 	if (!(adev->flags & AMD_IS_APU)) {
872e60f8db5SAlex Xie 		r = amdgpu_device_resize_fb_bar(adev);
873e60f8db5SAlex Xie 		if (r)
874e60f8db5SAlex Xie 			return r;
875e60f8db5SAlex Xie 	}
876770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
877770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
878e60f8db5SAlex Xie 
879156a81beSChunming Zhou #ifdef CONFIG_X86_64
880156a81beSChunming Zhou 	if (adev->flags & AMD_IS_APU) {
881156a81beSChunming Zhou 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
882156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
883156a81beSChunming Zhou 	}
884156a81beSChunming Zhou #endif
885e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
886770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
887770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
888770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
889e60f8db5SAlex Xie 
890e60f8db5SAlex Xie 	/* set the gart size */
891e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1) {
892e60f8db5SAlex Xie 		switch (adev->asic_type) {
893e60f8db5SAlex Xie 		case CHIP_VEGA10:  /* all engines support GPUVM */
894273a14cdSAlex Deucher 		case CHIP_VEGA12:  /* all engines support GPUVM */
895d96b428cSFeifei Xu 		case CHIP_VEGA20:
8963de2ff5dSLe Ma 		case CHIP_ARCTURUS:
897e60f8db5SAlex Xie 		default:
898fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
899e60f8db5SAlex Xie 			break;
900e60f8db5SAlex Xie 		case CHIP_RAVEN:   /* DCE SG support */
901770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
902e60f8db5SAlex Xie 			break;
903e60f8db5SAlex Xie 		}
904e60f8db5SAlex Xie 	} else {
905770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
906e60f8db5SAlex Xie 	}
907e60f8db5SAlex Xie 
908770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
909e60f8db5SAlex Xie 
910e60f8db5SAlex Xie 	return 0;
911e60f8db5SAlex Xie }
912e60f8db5SAlex Xie 
913e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
914e60f8db5SAlex Xie {
915e60f8db5SAlex Xie 	int r;
916e60f8db5SAlex Xie 
9171123b989SChristian König 	if (adev->gart.bo) {
918e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
919e60f8db5SAlex Xie 		return 0;
920e60f8db5SAlex Xie 	}
921e60f8db5SAlex Xie 	/* Initialize common gart structure */
922e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
923e60f8db5SAlex Xie 	if (r)
924e60f8db5SAlex Xie 		return r;
925e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
9267596ab68SHawking Zhang 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
927e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
928e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
929e60f8db5SAlex Xie }
930e60f8db5SAlex Xie 
931ebdef28eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
932ebdef28eSAlex Deucher {
933ebdef28eSAlex Deucher 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
934ebdef28eSAlex Deucher 	unsigned size;
935ebdef28eSAlex Deucher 
9366f752ec2SAndrey Grodzovsky 	/*
9376f752ec2SAndrey Grodzovsky 	 * TODO Remove once GART corruption is resolved
9386f752ec2SAndrey Grodzovsky 	 * Check related code in gmc_v9_0_sw_fini
9396f752ec2SAndrey Grodzovsky 	 * */
940cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
941cd2b5623SAlex Deucher 		return 9 * 1024 * 1024;
9426f752ec2SAndrey Grodzovsky 
943ebdef28eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
944ebdef28eSAlex Deucher 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
945ebdef28eSAlex Deucher 	} else {
946ebdef28eSAlex Deucher 		u32 viewport;
947ebdef28eSAlex Deucher 
948ebdef28eSAlex Deucher 		switch (adev->asic_type) {
949ebdef28eSAlex Deucher 		case CHIP_RAVEN:
950ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
951ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
952ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
953ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport,
954ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
955ebdef28eSAlex Deucher 				4);
956ebdef28eSAlex Deucher 			break;
957ebdef28eSAlex Deucher 		case CHIP_VEGA10:
958ebdef28eSAlex Deucher 		case CHIP_VEGA12:
959cd2b5623SAlex Deucher 		case CHIP_VEGA20:
960ebdef28eSAlex Deucher 		default:
961ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
962ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
963ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
964ebdef28eSAlex Deucher 				4);
965ebdef28eSAlex Deucher 			break;
966ebdef28eSAlex Deucher 		}
967ebdef28eSAlex Deucher 	}
968ebdef28eSAlex Deucher 	/* return 0 if the pre-OS buffer uses up most of vram */
969ebdef28eSAlex Deucher 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
970ebdef28eSAlex Deucher 		return 0;
9716f752ec2SAndrey Grodzovsky 
972ebdef28eSAlex Deucher 	return size;
973ebdef28eSAlex Deucher }
974ebdef28eSAlex Deucher 
975e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
976e60f8db5SAlex Xie {
977e60f8db5SAlex Xie 	int r;
978e60f8db5SAlex Xie 	int dma_bits;
979e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
980e60f8db5SAlex Xie 
981e60f8db5SAlex Xie 	gfxhub_v1_0_init(adev);
98251cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
98351cce480SLe Ma 		mmhub_v9_4_init(adev);
98451cce480SLe Ma 	else
985e60f8db5SAlex Xie 		mmhub_v1_0_init(adev);
986e60f8db5SAlex Xie 
987770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
988e60f8db5SAlex Xie 
9891e09b053SHawking Zhang 	adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
990e60f8db5SAlex Xie 	switch (adev->asic_type) {
991e60f8db5SAlex Xie 	case CHIP_RAVEN:
9921daa2bfaSLe Ma 		adev->num_vmhubs = 2;
9931daa2bfaSLe Ma 
9946a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
995f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
9966a42fd6fSChristian König 		} else {
9976a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
9986a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
999770d13b1SChristian König 			adev->gmc.translate_further =
10006a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
10016a42fd6fSChristian König 		}
1002e60f8db5SAlex Xie 		break;
1003e60f8db5SAlex Xie 	case CHIP_VEGA10:
1004273a14cdSAlex Deucher 	case CHIP_VEGA12:
1005d96b428cSFeifei Xu 	case CHIP_VEGA20:
10061daa2bfaSLe Ma 		adev->num_vmhubs = 2;
10071daa2bfaSLe Ma 
1008e60f8db5SAlex Xie 		/*
1009e60f8db5SAlex Xie 		 * To fulfill 4-level page support,
1010e60f8db5SAlex Xie 		 * vm size is 256TB (48bit), maximum size of Vega10,
1011e60f8db5SAlex Xie 		 * block size 512 (9bit)
1012e60f8db5SAlex Xie 		 */
1013cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1014cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
1015cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1016cdba61daSwentalou 		else
1017f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1018e60f8db5SAlex Xie 		break;
10193de2ff5dSLe Ma 	case CHIP_ARCTURUS:
1020c8a6e2a3SLe Ma 		adev->num_vmhubs = 3;
1021c8a6e2a3SLe Ma 
10223de2ff5dSLe Ma 		/* Keep the vm size same with Vega20 */
10233de2ff5dSLe Ma 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
10243de2ff5dSLe Ma 		break;
1025e60f8db5SAlex Xie 	default:
1026e60f8db5SAlex Xie 		break;
1027e60f8db5SAlex Xie 	}
1028e60f8db5SAlex Xie 
1029e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
103044a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1031770d13b1SChristian König 				&adev->gmc.vm_fault);
103230da7bb1SChristian König 	if (r)
103330da7bb1SChristian König 		return r;
103430da7bb1SChristian König 
103544a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1036770d13b1SChristian König 				&adev->gmc.vm_fault);
1037e60f8db5SAlex Xie 
1038e60f8db5SAlex Xie 	if (r)
1039e60f8db5SAlex Xie 		return r;
1040e60f8db5SAlex Xie 
1041791c4769Sxinhui pan 	/* interrupt sent to DF. */
1042791c4769Sxinhui pan 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1043791c4769Sxinhui pan 			&adev->gmc.ecc_irq);
1044791c4769Sxinhui pan 	if (r)
1045791c4769Sxinhui pan 		return r;
1046791c4769Sxinhui pan 
1047e60f8db5SAlex Xie 	/* Set the internal MC address mask
1048e60f8db5SAlex Xie 	 * This is the max address of the GPU's
1049e60f8db5SAlex Xie 	 * internal address space.
1050e60f8db5SAlex Xie 	 */
1051770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1052e60f8db5SAlex Xie 
1053e60f8db5SAlex Xie 	/* set DMA mask + need_dma32 flags.
1054e60f8db5SAlex Xie 	 * PCIE - can handle 44-bits.
1055e60f8db5SAlex Xie 	 * IGP - can handle 44-bits
1056e60f8db5SAlex Xie 	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
1057e60f8db5SAlex Xie 	 */
1058e60f8db5SAlex Xie 	adev->need_dma32 = false;
1059e60f8db5SAlex Xie 	dma_bits = adev->need_dma32 ? 32 : 44;
1060e60f8db5SAlex Xie 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1061e60f8db5SAlex Xie 	if (r) {
1062e60f8db5SAlex Xie 		adev->need_dma32 = true;
1063e60f8db5SAlex Xie 		dma_bits = 32;
1064e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1065e60f8db5SAlex Xie 	}
1066e60f8db5SAlex Xie 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1067e60f8db5SAlex Xie 	if (r) {
1068e60f8db5SAlex Xie 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1069e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
1070e60f8db5SAlex Xie 	}
1071913b2cb7SMichael D Labriola 	adev->need_swiotlb = drm_need_swiotlb(dma_bits);
1072e60f8db5SAlex Xie 
107347622ba0SAlex Deucher 	if (adev->gmc.xgmi.supported) {
1074bf0a60b7SAlex Deucher 		r = gfxhub_v1_1_get_xgmi_info(adev);
1075bf0a60b7SAlex Deucher 		if (r)
1076bf0a60b7SAlex Deucher 			return r;
1077bf0a60b7SAlex Deucher 	}
1078bf0a60b7SAlex Deucher 
1079e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
1080e60f8db5SAlex Xie 	if (r)
1081e60f8db5SAlex Xie 		return r;
1082e60f8db5SAlex Xie 
1083ebdef28eSAlex Deucher 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1084ebdef28eSAlex Deucher 
1085e60f8db5SAlex Xie 	/* Memory manager */
1086e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
1087e60f8db5SAlex Xie 	if (r)
1088e60f8db5SAlex Xie 		return r;
1089e60f8db5SAlex Xie 
1090e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
1091e60f8db5SAlex Xie 	if (r)
1092e60f8db5SAlex Xie 		return r;
1093e60f8db5SAlex Xie 
109405ec3edaSChristian König 	/*
109505ec3edaSChristian König 	 * number of VMs
109605ec3edaSChristian König 	 * VMID 0 is reserved for System
109705ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
109805ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
109905ec3edaSChristian König 	 */
1100a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1101a2d15ed7SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
1102c8a6e2a3SLe Ma 	adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS;
110305ec3edaSChristian König 
110405ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
110505ec3edaSChristian König 
110605ec3edaSChristian König 	return 0;
1107e60f8db5SAlex Xie }
1108e60f8db5SAlex Xie 
1109e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
1110e60f8db5SAlex Xie {
1111e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1112e60f8db5SAlex Xie 
1113791c4769Sxinhui pan 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1114791c4769Sxinhui pan 			adev->gmc.ras_if) {
1115791c4769Sxinhui pan 		struct ras_common_if *ras_if = adev->gmc.ras_if;
1116791c4769Sxinhui pan 		struct ras_ih_if ih_info = {
1117791c4769Sxinhui pan 			.head = *ras_if,
1118791c4769Sxinhui pan 		};
1119791c4769Sxinhui pan 
1120791c4769Sxinhui pan 		/*remove fs first*/
1121791c4769Sxinhui pan 		amdgpu_ras_debugfs_remove(adev, ras_if);
1122791c4769Sxinhui pan 		amdgpu_ras_sysfs_remove(adev, ras_if);
1123791c4769Sxinhui pan 		/*remove the IH*/
1124791c4769Sxinhui pan 		amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1125791c4769Sxinhui pan 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1126791c4769Sxinhui pan 		kfree(ras_if);
1127791c4769Sxinhui pan 	}
1128791c4769Sxinhui pan 
1129f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
1130e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
11316f752ec2SAndrey Grodzovsky 
1132cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
11336f752ec2SAndrey Grodzovsky 		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
11346f752ec2SAndrey Grodzovsky 
1135a3d9103eSAndrey Grodzovsky 	amdgpu_gart_table_vram_free(adev);
1136e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
1137a3d9103eSAndrey Grodzovsky 	amdgpu_gart_fini(adev);
1138e60f8db5SAlex Xie 
1139e60f8db5SAlex Xie 	return 0;
1140e60f8db5SAlex Xie }
1141e60f8db5SAlex Xie 
1142e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1143e60f8db5SAlex Xie {
1144946a4d5bSShaoyun Liu 
1145e60f8db5SAlex Xie 	switch (adev->asic_type) {
1146e60f8db5SAlex Xie 	case CHIP_VEGA10:
114798cad2deSTrigger Huang 		if (amdgpu_virt_support_skip_setting(adev))
114898cad2deSTrigger Huang 			break;
114998cad2deSTrigger Huang 		/* fall through */
1150d96b428cSFeifei Xu 	case CHIP_VEGA20:
1151946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11525c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
1153c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1154946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11555c583018SEvan Quan 						golden_settings_athub_1_0_0,
1156c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1157e60f8db5SAlex Xie 		break;
1158273a14cdSAlex Deucher 	case CHIP_VEGA12:
1159273a14cdSAlex Deucher 		break;
1160e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
1161946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11625c583018SEvan Quan 						golden_settings_athub_1_0_0,
1163c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1164e4f3abaaSChunming Zhou 		break;
1165e60f8db5SAlex Xie 	default:
1166e60f8db5SAlex Xie 		break;
1167e60f8db5SAlex Xie 	}
1168e60f8db5SAlex Xie }
1169e60f8db5SAlex Xie 
1170e60f8db5SAlex Xie /**
1171e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1172e60f8db5SAlex Xie  *
1173e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1174e60f8db5SAlex Xie  */
1175e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1176e60f8db5SAlex Xie {
1177e60f8db5SAlex Xie 	int r;
1178e60f8db5SAlex Xie 	bool value;
1179e60f8db5SAlex Xie 	u32 tmp;
1180e60f8db5SAlex Xie 
11819c3f2b54SAlex Deucher 	amdgpu_device_program_register_sequence(adev,
1182e60f8db5SAlex Xie 						golden_settings_vega10_hdp,
1183c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_vega10_hdp));
1184e60f8db5SAlex Xie 
11851123b989SChristian König 	if (adev->gart.bo == NULL) {
1186e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1187e60f8db5SAlex Xie 		return -EINVAL;
1188e60f8db5SAlex Xie 	}
1189ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
1190ce1b1b66SMonk Liu 	if (r)
1191ce1b1b66SMonk Liu 		return r;
1192e60f8db5SAlex Xie 
11932fcd43ceSHawking Zhang 	switch (adev->asic_type) {
11942fcd43ceSHawking Zhang 	case CHIP_RAVEN:
1195f8386b35SHawking Zhang 		mmhub_v1_0_update_power_gating(adev, true);
11962fcd43ceSHawking Zhang 		break;
11972fcd43ceSHawking Zhang 	default:
11982fcd43ceSHawking Zhang 		break;
11992fcd43ceSHawking Zhang 	}
12002fcd43ceSHawking Zhang 
1201e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
1202e60f8db5SAlex Xie 	if (r)
1203e60f8db5SAlex Xie 		return r;
1204e60f8db5SAlex Xie 
120551cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
120651cce480SLe Ma 		r = mmhub_v9_4_gart_enable(adev);
120751cce480SLe Ma 	else
1208e60f8db5SAlex Xie 		r = mmhub_v1_0_gart_enable(adev);
1209e60f8db5SAlex Xie 	if (r)
1210e60f8db5SAlex Xie 		return r;
1211e60f8db5SAlex Xie 
1212846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1213e60f8db5SAlex Xie 
1214b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1215b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1216e60f8db5SAlex Xie 
1217fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1218fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1219fe2b5323STiecheng Zhou 
12201d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
122169882565SChristian König 	adev->nbio_funcs->hdp_flush(adev, NULL);
12221d4e0a8cSMonk Liu 
1223e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1224e60f8db5SAlex Xie 		value = false;
1225e60f8db5SAlex Xie 	else
1226e60f8db5SAlex Xie 		value = true;
1227e60f8db5SAlex Xie 
1228e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
122951cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
123051cce480SLe Ma 		mmhub_v9_4_set_fault_enable_default(adev, value);
123151cce480SLe Ma 	else
1232e60f8db5SAlex Xie 		mmhub_v1_0_set_fault_enable_default(adev, value);
12332a79d868SYong Zhao 	gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
1234e60f8db5SAlex Xie 
1235e60f8db5SAlex Xie 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1236770d13b1SChristian König 		 (unsigned)(adev->gmc.gart_size >> 20),
12374e830fb1SChristian König 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1238e60f8db5SAlex Xie 	adev->gart.ready = true;
1239e60f8db5SAlex Xie 	return 0;
1240e60f8db5SAlex Xie }
1241e60f8db5SAlex Xie 
1242e60f8db5SAlex Xie static int gmc_v9_0_hw_init(void *handle)
1243e60f8db5SAlex Xie {
1244e60f8db5SAlex Xie 	int r;
1245e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1246e60f8db5SAlex Xie 
1247e60f8db5SAlex Xie 	/* The sequence of these two function calls matters.*/
1248e60f8db5SAlex Xie 	gmc_v9_0_init_golden_registers(adev);
1249e60f8db5SAlex Xie 
1250edca2d05SAlex Deucher 	if (adev->mode_info.num_crtc) {
1251edca2d05SAlex Deucher 		/* Lockout access through VGA aperture*/
12524d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1253edca2d05SAlex Deucher 
1254edca2d05SAlex Deucher 		/* disable VGA render */
12554d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1256edca2d05SAlex Deucher 	}
1257edca2d05SAlex Deucher 
1258e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1259e60f8db5SAlex Xie 
1260e60f8db5SAlex Xie 	return r;
1261e60f8db5SAlex Xie }
1262e60f8db5SAlex Xie 
1263e60f8db5SAlex Xie /**
1264e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1265e60f8db5SAlex Xie  *
1266e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1267e60f8db5SAlex Xie  *
1268e60f8db5SAlex Xie  * This disables all VM page table.
1269e60f8db5SAlex Xie  */
1270e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1271e60f8db5SAlex Xie {
1272e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
127351cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
127451cce480SLe Ma 		mmhub_v9_4_gart_disable(adev);
127551cce480SLe Ma 	else
1276e60f8db5SAlex Xie 		mmhub_v1_0_gart_disable(adev);
1277ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1278e60f8db5SAlex Xie }
1279e60f8db5SAlex Xie 
1280e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1281e60f8db5SAlex Xie {
1282e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1283e60f8db5SAlex Xie 
12845dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
12855dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
12865dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
12875dd696aeSTrigger Huang 		return 0;
12885dd696aeSTrigger Huang 	}
12895dd696aeSTrigger Huang 
1290791c4769Sxinhui pan 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1291770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1292e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1293e60f8db5SAlex Xie 
1294e60f8db5SAlex Xie 	return 0;
1295e60f8db5SAlex Xie }
1296e60f8db5SAlex Xie 
1297e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1298e60f8db5SAlex Xie {
1299e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300e60f8db5SAlex Xie 
1301f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1302e60f8db5SAlex Xie }
1303e60f8db5SAlex Xie 
1304e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1305e60f8db5SAlex Xie {
1306e60f8db5SAlex Xie 	int r;
1307e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1308e60f8db5SAlex Xie 
1309e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1310e60f8db5SAlex Xie 	if (r)
1311e60f8db5SAlex Xie 		return r;
1312e60f8db5SAlex Xie 
1313620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1314e60f8db5SAlex Xie 
131532601d48SChristian König 	return 0;
1316e60f8db5SAlex Xie }
1317e60f8db5SAlex Xie 
1318e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1319e60f8db5SAlex Xie {
1320e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1321e60f8db5SAlex Xie 	return true;
1322e60f8db5SAlex Xie }
1323e60f8db5SAlex Xie 
1324e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1325e60f8db5SAlex Xie {
1326e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1327e60f8db5SAlex Xie 	return 0;
1328e60f8db5SAlex Xie }
1329e60f8db5SAlex Xie 
1330e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1331e60f8db5SAlex Xie {
1332e60f8db5SAlex Xie 	/* XXX for emulation.*/
1333e60f8db5SAlex Xie 	return 0;
1334e60f8db5SAlex Xie }
1335e60f8db5SAlex Xie 
1336e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1337e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1338e60f8db5SAlex Xie {
1339d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1340d5583d4fSHuang Rui 
134151cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
134251cce480SLe Ma 		return 0;
134351cce480SLe Ma 
1344d5583d4fSHuang Rui 	return mmhub_v1_0_set_clockgating(adev, state);
1345e60f8db5SAlex Xie }
1346e60f8db5SAlex Xie 
134713052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
134813052be5SHuang Rui {
134913052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
135013052be5SHuang Rui 
135151cce480SLe Ma 	if (adev->asic_type == CHIP_ARCTURUS)
135251cce480SLe Ma 		return;
135351cce480SLe Ma 
135413052be5SHuang Rui 	mmhub_v1_0_get_clockgating(adev, flags);
135513052be5SHuang Rui }
135613052be5SHuang Rui 
1357e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1358e60f8db5SAlex Xie 					enum amd_powergating_state state)
1359e60f8db5SAlex Xie {
1360e60f8db5SAlex Xie 	return 0;
1361e60f8db5SAlex Xie }
1362e60f8db5SAlex Xie 
1363e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1364e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1365e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1366e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1367e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1368e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1369e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1370e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1371e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1372e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1373e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1374e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1375e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1376e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1377e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
137813052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1379e60f8db5SAlex Xie };
1380e60f8db5SAlex Xie 
1381e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1382e60f8db5SAlex Xie {
1383e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1384e60f8db5SAlex Xie 	.major = 9,
1385e60f8db5SAlex Xie 	.minor = 0,
1386e60f8db5SAlex Xie 	.rev = 0,
1387e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1388e60f8db5SAlex Xie };
1389