xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision f49ea9f8)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23e60f8db5SAlex Xie #include <linux/firmware.h>
24fd5fd480SChunming Zhou #include <drm/drm_cache.h>
25e60f8db5SAlex Xie #include "amdgpu.h"
26e60f8db5SAlex Xie #include "gmc_v9_0.h"
278d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
282cddc50eSHuang Rui #include "amdgpu_gem.h"
29e60f8db5SAlex Xie 
3075199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3175199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
32cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
33135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
34135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
35fb960bd2SFeifei Xu #include "vega10_enum.h"
3665417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
376ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
38250b4228SChristian König #include "oss/osssys_4_0_offset.h"
39e60f8db5SAlex Xie 
40946a4d5bSShaoyun Liu #include "soc15.h"
41e60f8db5SAlex Xie #include "soc15_common.h"
4290c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
43e60f8db5SAlex Xie 
44e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
45e60f8db5SAlex Xie #include "mmhub_v1_0.h"
46bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
47e60f8db5SAlex Xie 
4844a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
4944a99b65SAndrey Grodzovsky 
50791c4769Sxinhui pan #include "amdgpu_ras.h"
51791c4769Sxinhui pan 
52ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
53ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
54ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
55ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
56ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
57ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
58ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
59ebdef28eSAlex Deucher 
60e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
61e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
62e60f8db5SAlex Xie 
63e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
64e60f8db5SAlex Xie {
65e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
66e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
67e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
68e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
69e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
70e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
71e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
72e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
73e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
74e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
75e60f8db5SAlex Xie };
76e60f8db5SAlex Xie 
77946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
785c583018SEvan Quan {
79946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
80946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
815c583018SEvan Quan };
825c583018SEvan Quan 
83946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
845c583018SEvan Quan {
85946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
86946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
875c583018SEvan Quan };
885c583018SEvan Quan 
89791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
90791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
91791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
92791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
93791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
94791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
95791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
96791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
97791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
98791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
99791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
100791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
101791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
102791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
103791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
104791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
105791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
106791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
107791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
108791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
109791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
110791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
111791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
112791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
113791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
114791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
115791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
116791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
117791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
118791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
119791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
120791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
121791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
122791c4769Sxinhui pan };
123791c4769Sxinhui pan 
124791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
125791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
126791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
127791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
128791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
129791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
130791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
131791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
132791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
133791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
134791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
135791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
136791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
137791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
138791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
139791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
140791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
141791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
142791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
143791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
144791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
145791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
146791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
147791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
148791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
149791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
150791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
151791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
152791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
153791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
154791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
155791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
156791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
157791c4769Sxinhui pan };
158791c4769Sxinhui pan 
159791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_status_addrs[] = {
160791c4769Sxinhui pan 	(0x000143c2 + 0x00000000),
161791c4769Sxinhui pan 	(0x000143c2 + 0x00000800),
162791c4769Sxinhui pan 	(0x000143c2 + 0x00001000),
163791c4769Sxinhui pan 	(0x000143c2 + 0x00001800),
164791c4769Sxinhui pan 	(0x000543c2 + 0x00000000),
165791c4769Sxinhui pan 	(0x000543c2 + 0x00000800),
166791c4769Sxinhui pan 	(0x000543c2 + 0x00001000),
167791c4769Sxinhui pan 	(0x000543c2 + 0x00001800),
168791c4769Sxinhui pan 	(0x000943c2 + 0x00000000),
169791c4769Sxinhui pan 	(0x000943c2 + 0x00000800),
170791c4769Sxinhui pan 	(0x000943c2 + 0x00001000),
171791c4769Sxinhui pan 	(0x000943c2 + 0x00001800),
172791c4769Sxinhui pan 	(0x000d43c2 + 0x00000000),
173791c4769Sxinhui pan 	(0x000d43c2 + 0x00000800),
174791c4769Sxinhui pan 	(0x000d43c2 + 0x00001000),
175791c4769Sxinhui pan 	(0x000d43c2 + 0x00001800),
176791c4769Sxinhui pan 	(0x001143c2 + 0x00000000),
177791c4769Sxinhui pan 	(0x001143c2 + 0x00000800),
178791c4769Sxinhui pan 	(0x001143c2 + 0x00001000),
179791c4769Sxinhui pan 	(0x001143c2 + 0x00001800),
180791c4769Sxinhui pan 	(0x001543c2 + 0x00000000),
181791c4769Sxinhui pan 	(0x001543c2 + 0x00000800),
182791c4769Sxinhui pan 	(0x001543c2 + 0x00001000),
183791c4769Sxinhui pan 	(0x001543c2 + 0x00001800),
184791c4769Sxinhui pan 	(0x001943c2 + 0x00000000),
185791c4769Sxinhui pan 	(0x001943c2 + 0x00000800),
186791c4769Sxinhui pan 	(0x001943c2 + 0x00001000),
187791c4769Sxinhui pan 	(0x001943c2 + 0x00001800),
188791c4769Sxinhui pan 	(0x001d43c2 + 0x00000000),
189791c4769Sxinhui pan 	(0x001d43c2 + 0x00000800),
190791c4769Sxinhui pan 	(0x001d43c2 + 0x00001000),
191791c4769Sxinhui pan 	(0x001d43c2 + 0x00001800),
192791c4769Sxinhui pan };
193791c4769Sxinhui pan 
194791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
195791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
196791c4769Sxinhui pan 		unsigned type,
197791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
198791c4769Sxinhui pan {
199791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
200791c4769Sxinhui pan 
201791c4769Sxinhui pan 	bits = 0x7f;
202791c4769Sxinhui pan 
203791c4769Sxinhui pan 	switch (state) {
204791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
205791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
206791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
207791c4769Sxinhui pan 			tmp = RREG32(reg);
208791c4769Sxinhui pan 			tmp &= ~bits;
209791c4769Sxinhui pan 			WREG32(reg, tmp);
210791c4769Sxinhui pan 		}
211791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
212791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
213791c4769Sxinhui pan 			tmp = RREG32(reg);
214791c4769Sxinhui pan 			tmp &= ~bits;
215791c4769Sxinhui pan 			WREG32(reg, tmp);
216791c4769Sxinhui pan 		}
217791c4769Sxinhui pan 		break;
218791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
219791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
220791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
221791c4769Sxinhui pan 			tmp = RREG32(reg);
222791c4769Sxinhui pan 			tmp |= bits;
223791c4769Sxinhui pan 			WREG32(reg, tmp);
224791c4769Sxinhui pan 		}
225791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
226791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
227791c4769Sxinhui pan 			tmp = RREG32(reg);
228791c4769Sxinhui pan 			tmp |= bits;
229791c4769Sxinhui pan 			WREG32(reg, tmp);
230791c4769Sxinhui pan 		}
231791c4769Sxinhui pan 		break;
232791c4769Sxinhui pan 	default:
233791c4769Sxinhui pan 		break;
234791c4769Sxinhui pan 	}
235791c4769Sxinhui pan 
236791c4769Sxinhui pan 	return 0;
237791c4769Sxinhui pan }
238791c4769Sxinhui pan 
239791c4769Sxinhui pan static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
240791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
241791c4769Sxinhui pan {
2429b54d201SEric Huang 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
243791c4769Sxinhui pan 	amdgpu_ras_reset_gpu(adev, 0);
244791c4769Sxinhui pan 	return AMDGPU_RAS_UE;
245791c4769Sxinhui pan }
246791c4769Sxinhui pan 
247791c4769Sxinhui pan static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
248791c4769Sxinhui pan 		struct amdgpu_irq_src *source,
249791c4769Sxinhui pan 		struct amdgpu_iv_entry *entry)
250791c4769Sxinhui pan {
251791c4769Sxinhui pan 	struct ras_dispatch_if ih_data = {
252791c4769Sxinhui pan 		.head = *adev->gmc.ras_if,
253791c4769Sxinhui pan 		.entry = entry,
254791c4769Sxinhui pan 	};
255791c4769Sxinhui pan 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
256791c4769Sxinhui pan 	return 0;
257791c4769Sxinhui pan }
258791c4769Sxinhui pan 
259e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
260e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
261e60f8db5SAlex Xie 					unsigned type,
262e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
263e60f8db5SAlex Xie {
264e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
265ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
266e60f8db5SAlex Xie 
26711250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
26811250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
26911250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
27011250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
27111250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
27211250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
27311250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
27411250164SChristian König 
275e60f8db5SAlex Xie 	switch (state) {
276e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
277ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
278ae6d1416STom St Denis 			hub = &adev->vmhub[j];
279e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
280e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
281e60f8db5SAlex Xie 				tmp = RREG32(reg);
282e60f8db5SAlex Xie 				tmp &= ~bits;
283e60f8db5SAlex Xie 				WREG32(reg, tmp);
284e60f8db5SAlex Xie 			}
285e60f8db5SAlex Xie 		}
286e60f8db5SAlex Xie 		break;
287e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
288ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
289ae6d1416STom St Denis 			hub = &adev->vmhub[j];
290e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
291e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
292e60f8db5SAlex Xie 				tmp = RREG32(reg);
293e60f8db5SAlex Xie 				tmp |= bits;
294e60f8db5SAlex Xie 				WREG32(reg, tmp);
295e60f8db5SAlex Xie 			}
296e60f8db5SAlex Xie 		}
297e60f8db5SAlex Xie 	default:
298e60f8db5SAlex Xie 		break;
299e60f8db5SAlex Xie 	}
300e60f8db5SAlex Xie 
301e60f8db5SAlex Xie 	return 0;
302e60f8db5SAlex Xie }
303e60f8db5SAlex Xie 
30422666cc1SChristian König /**
30522666cc1SChristian König  * vega10_ih_prescreen_iv - prescreen an interrupt vector
30622666cc1SChristian König  *
30722666cc1SChristian König  * @adev: amdgpu_device pointer
30822666cc1SChristian König  *
30922666cc1SChristian König  * Returns true if the interrupt vector should be further processed.
31022666cc1SChristian König  */
31122666cc1SChristian König static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
31222666cc1SChristian König 				  struct amdgpu_iv_entry *entry,
31322666cc1SChristian König 				  uint64_t addr)
31422666cc1SChristian König {
31522666cc1SChristian König 	struct amdgpu_vm *vm;
31622666cc1SChristian König 	u64 key;
31722666cc1SChristian König 	int r;
31822666cc1SChristian König 
31922666cc1SChristian König 	/* No PASID, can't identify faulting process */
32022666cc1SChristian König 	if (!entry->pasid)
32122666cc1SChristian König 		return true;
32222666cc1SChristian König 
32322666cc1SChristian König 	/* Not a retry fault */
32422666cc1SChristian König 	if (!(entry->src_data[1] & 0x80))
32522666cc1SChristian König 		return true;
32622666cc1SChristian König 
32722666cc1SChristian König 	/* Track retry faults in per-VM fault FIFO. */
32822666cc1SChristian König 	spin_lock(&adev->vm_manager.pasid_lock);
32922666cc1SChristian König 	vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
33022666cc1SChristian König 	if (!vm) {
33122666cc1SChristian König 		/* VM not found, process it normally */
33222666cc1SChristian König 		spin_unlock(&adev->vm_manager.pasid_lock);
33322666cc1SChristian König 		return true;
33422666cc1SChristian König 	}
33522666cc1SChristian König 
33622666cc1SChristian König 	key = AMDGPU_VM_FAULT(entry->pasid, addr);
33722666cc1SChristian König 	r = amdgpu_vm_add_fault(vm->fault_hash, key);
33822666cc1SChristian König 
33922666cc1SChristian König 	/* Hash table is full or the fault is already being processed,
34022666cc1SChristian König 	 * ignore further page faults
34122666cc1SChristian König 	 */
34222666cc1SChristian König 	if (r != 0) {
34322666cc1SChristian König 		spin_unlock(&adev->vm_manager.pasid_lock);
34422666cc1SChristian König 		return false;
34522666cc1SChristian König 	}
34622666cc1SChristian König 	/* No locking required with single writer and single reader */
34722666cc1SChristian König 	r = kfifo_put(&vm->faults, key);
34822666cc1SChristian König 	if (!r) {
34922666cc1SChristian König 		/* FIFO is full. Ignore it until there is space */
35022666cc1SChristian König 		amdgpu_vm_clear_fault(vm->fault_hash, key);
35122666cc1SChristian König 		spin_unlock(&adev->vm_manager.pasid_lock);
35222666cc1SChristian König 		return false;
35322666cc1SChristian König 	}
35422666cc1SChristian König 
35522666cc1SChristian König 	spin_unlock(&adev->vm_manager.pasid_lock);
35622666cc1SChristian König 	/* It's the first fault for this address, process it normally */
35722666cc1SChristian König 	return true;
35822666cc1SChristian König }
35922666cc1SChristian König 
360e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
361e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
362e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
363e60f8db5SAlex Xie {
364c4f46f22SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
365c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
3664d6cbde3SFelix Kuehling 	uint32_t status = 0;
367e60f8db5SAlex Xie 	u64 addr;
368e60f8db5SAlex Xie 
369e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
370e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
371e60f8db5SAlex Xie 
37222666cc1SChristian König 	if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
37322666cc1SChristian König 		return 1; /* This also prevents sending it to KFD */
37422666cc1SChristian König 
37579a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
3765a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
3775a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
3784d6cbde3SFelix Kuehling 	}
379e60f8db5SAlex Xie 
3804d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
38105794effSShirish S 		struct amdgpu_task_info task_info;
382efaa9646SAndrey Grodzovsky 
38305794effSShirish S 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
384efaa9646SAndrey Grodzovsky 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
385efaa9646SAndrey Grodzovsky 
3864d6cbde3SFelix Kuehling 		dev_err(adev->dev,
387c468f9e2SChristian König 			"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
388c468f9e2SChristian König 			"pasid:%u, for process %s pid %d thread %s pid %d)\n",
389c4f46f22SChristian König 			entry->vmid_src ? "mmhub" : "gfxhub",
390c468f9e2SChristian König 			retry_fault ? "retry" : "no-retry",
391c4f46f22SChristian König 			entry->src_id, entry->ring_id, entry->vmid,
392efaa9646SAndrey Grodzovsky 			entry->pasid, task_info.process_name, task_info.tgid,
393efaa9646SAndrey Grodzovsky 			task_info.task_name, task_info.pid);
3947d0aa376SAndrey Grodzovsky 		dev_err(adev->dev, "  in page starting at address 0x%016llx from %d\n",
39579a0c465SMonk Liu 			addr, entry->client_id);
3964d6cbde3SFelix Kuehling 		if (!amdgpu_sriov_vf(adev))
3974d6cbde3SFelix Kuehling 			dev_err(adev->dev,
3984d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
3994d6cbde3SFelix Kuehling 				status);
40079a0c465SMonk Liu 	}
401e60f8db5SAlex Xie 
402e60f8db5SAlex Xie 	return 0;
403e60f8db5SAlex Xie }
404e60f8db5SAlex Xie 
405e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
406e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
407e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
408e60f8db5SAlex Xie };
409e60f8db5SAlex Xie 
410791c4769Sxinhui pan 
411791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
412791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
413791c4769Sxinhui pan 	.process = gmc_v9_0_process_ecc_irq,
414791c4769Sxinhui pan };
415791c4769Sxinhui pan 
416e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
417e60f8db5SAlex Xie {
418770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
419770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
420791c4769Sxinhui pan 
421791c4769Sxinhui pan 	adev->gmc.ecc_irq.num_types = 1;
422791c4769Sxinhui pan 	adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
423e60f8db5SAlex Xie }
424e60f8db5SAlex Xie 
4252a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
4262a79d868SYong Zhao 					uint32_t flush_type)
42703f89febSChristian König {
42803f89febSChristian König 	u32 req = 0;
42903f89febSChristian König 
43003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
431c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
4322a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
43303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
43403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
43503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
43603f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
43703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
43803f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
43903f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
44003f89febSChristian König 
44103f89febSChristian König 	return req;
44203f89febSChristian König }
44303f89febSChristian König 
444e60f8db5SAlex Xie /*
445e60f8db5SAlex Xie  * GART
446e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
447e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
448e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
449e60f8db5SAlex Xie  */
450e60f8db5SAlex Xie 
451e60f8db5SAlex Xie /**
4522a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
453e60f8db5SAlex Xie  *
454e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
455e60f8db5SAlex Xie  * @vmid: vm instance to flush
4562a79d868SYong Zhao  * @flush_type: the flush type
457e60f8db5SAlex Xie  *
4582a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
459e60f8db5SAlex Xie  */
460132f34e4SChristian König static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
4612a79d868SYong Zhao 				uint32_t vmid, uint32_t flush_type)
462e60f8db5SAlex Xie {
463e60f8db5SAlex Xie 	const unsigned eng = 17;
464e60f8db5SAlex Xie 	unsigned i, j;
465e60f8db5SAlex Xie 
466e60f8db5SAlex Xie 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
467e60f8db5SAlex Xie 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
4682a79d868SYong Zhao 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
469e60f8db5SAlex Xie 
47082d1a1b1SChengming Gui 		/* This is necessary for a HW workaround under SRIOV as well
47182d1a1b1SChengming Gui 		 * as GFXOFF under bare metal
47282d1a1b1SChengming Gui 		 */
47382d1a1b1SChengming Gui 		if (adev->gfx.kiq.ring.sched.ready &&
47482d1a1b1SChengming Gui 		    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
47582d1a1b1SChengming Gui 		    !adev->in_gpu_reset) {
476af5fe1e9SChristian König 			uint32_t req = hub->vm_inv_eng0_req + eng;
477af5fe1e9SChristian König 			uint32_t ack = hub->vm_inv_eng0_ack + eng;
478af5fe1e9SChristian König 
479af5fe1e9SChristian König 			amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
480af5fe1e9SChristian König 							   1 << vmid);
4813890d111SEmily Deng 			continue;
482fc0faf04SEmily Deng 		}
4833890d111SEmily Deng 
4843890d111SEmily Deng 		spin_lock(&adev->gmc.invalidate_lock);
485c7a7266bSXiangliang Yu 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
486e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
487c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
488396557b0SChristian König 			if (tmp & (1 << vmid))
489e60f8db5SAlex Xie 				break;
490e60f8db5SAlex Xie 			udelay(1);
491e60f8db5SAlex Xie 		}
4923890d111SEmily Deng 		spin_unlock(&adev->gmc.invalidate_lock);
493396557b0SChristian König 		if (j < adev->usec_timeout)
494e60f8db5SAlex Xie 			continue;
495396557b0SChristian König 
496e60f8db5SAlex Xie 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
497e60f8db5SAlex Xie 	}
498e60f8db5SAlex Xie }
499e60f8db5SAlex Xie 
5009096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
501c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
5029096d6e5SChristian König {
503250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
504250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
5052a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
5069096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
5079096d6e5SChristian König 
5089096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
5099096d6e5SChristian König 			      lower_32_bits(pd_addr));
5109096d6e5SChristian König 
5119096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
5129096d6e5SChristian König 			      upper_32_bits(pd_addr));
5139096d6e5SChristian König 
514f8bc9037SAlex Deucher 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
515f8bc9037SAlex Deucher 					    hub->vm_inv_eng0_ack + eng,
516f8bc9037SAlex Deucher 					    req, 1 << vmid);
517f732b6b3SChristian König 
5189096d6e5SChristian König 	return pd_addr;
5199096d6e5SChristian König }
5209096d6e5SChristian König 
521c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
522c633c00bSChristian König 					unsigned pasid)
523c633c00bSChristian König {
524c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
525c633c00bSChristian König 	uint32_t reg;
526c633c00bSChristian König 
527c633c00bSChristian König 	if (ring->funcs->vmhub == AMDGPU_GFXHUB)
528c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
529c633c00bSChristian König 	else
530c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
531c633c00bSChristian König 
532c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
533c633c00bSChristian König }
534c633c00bSChristian König 
535e60f8db5SAlex Xie /*
536e60f8db5SAlex Xie  * PTE format on VEGA 10:
537e60f8db5SAlex Xie  * 63:59 reserved
538e60f8db5SAlex Xie  * 58:57 mtype
539e60f8db5SAlex Xie  * 56 F
540e60f8db5SAlex Xie  * 55 L
541e60f8db5SAlex Xie  * 54 P
542e60f8db5SAlex Xie  * 53 SW
543e60f8db5SAlex Xie  * 52 T
544e60f8db5SAlex Xie  * 50:48 reserved
545e60f8db5SAlex Xie  * 47:12 4k physical page base address
546e60f8db5SAlex Xie  * 11:7 fragment
547e60f8db5SAlex Xie  * 6 write
548e60f8db5SAlex Xie  * 5 read
549e60f8db5SAlex Xie  * 4 exe
550e60f8db5SAlex Xie  * 3 Z
551e60f8db5SAlex Xie  * 2 snooped
552e60f8db5SAlex Xie  * 1 system
553e60f8db5SAlex Xie  * 0 valid
554e60f8db5SAlex Xie  *
555e60f8db5SAlex Xie  * PDE format on VEGA 10:
556e60f8db5SAlex Xie  * 63:59 block fragment size
557e60f8db5SAlex Xie  * 58:55 reserved
558e60f8db5SAlex Xie  * 54 P
559e60f8db5SAlex Xie  * 53:48 reserved
560e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
561e60f8db5SAlex Xie  * 5:3 reserved
562e60f8db5SAlex Xie  * 2 C
563e60f8db5SAlex Xie  * 1 system
564e60f8db5SAlex Xie  * 0 valid
565e60f8db5SAlex Xie  */
566e60f8db5SAlex Xie 
567e60f8db5SAlex Xie static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
568e60f8db5SAlex Xie 						uint32_t flags)
569e60f8db5SAlex Xie 
570e60f8db5SAlex Xie {
571e60f8db5SAlex Xie 	uint64_t pte_flag = 0;
572e60f8db5SAlex Xie 
573e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
574e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
575e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_READABLE)
576e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_READABLE;
577e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
578e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_WRITEABLE;
579e60f8db5SAlex Xie 
580e60f8db5SAlex Xie 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
581e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
582e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
583e60f8db5SAlex Xie 		break;
584e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
585e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
586e60f8db5SAlex Xie 		break;
587e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
588e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
589e60f8db5SAlex Xie 		break;
590e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
591e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
592e60f8db5SAlex Xie 		break;
593e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
594e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
595e60f8db5SAlex Xie 		break;
596e60f8db5SAlex Xie 	default:
597e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
598e60f8db5SAlex Xie 		break;
599e60f8db5SAlex Xie 	}
600e60f8db5SAlex Xie 
601e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_PRT)
602e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_PRT;
603e60f8db5SAlex Xie 
604e60f8db5SAlex Xie 	return pte_flag;
605e60f8db5SAlex Xie }
606e60f8db5SAlex Xie 
6073de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
6083de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
609f75e237cSChristian König {
610bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
6113de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
612770d13b1SChristian König 			adev->gmc.vram_start;
6133de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
6146a42fd6fSChristian König 
615770d13b1SChristian König 	if (!adev->gmc.translate_further)
6166a42fd6fSChristian König 		return;
6176a42fd6fSChristian König 
6186a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
6196a42fd6fSChristian König 		/* Set the block fragment size */
6206a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
6216a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
6226a42fd6fSChristian König 
6236a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
6246a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
6256a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
6266a42fd6fSChristian König 		else
6276a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
6286a42fd6fSChristian König 	}
629f75e237cSChristian König }
630f75e237cSChristian König 
631132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
632132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
6339096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
634c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
635b1166325SChristian König 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
636b1166325SChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde
637e60f8db5SAlex Xie };
638e60f8db5SAlex Xie 
639132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
640e60f8db5SAlex Xie {
641132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
642e60f8db5SAlex Xie }
643e60f8db5SAlex Xie 
644e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
645e60f8db5SAlex Xie {
646e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
647e60f8db5SAlex Xie 
648132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
649e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
650e60f8db5SAlex Xie 
651770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
652770d13b1SChristian König 	adev->gmc.shared_aperture_end =
653770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
654bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
655770d13b1SChristian König 	adev->gmc.private_aperture_end =
656770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
657a7ea6548SAlex Deucher 
658e60f8db5SAlex Xie 	return 0;
659e60f8db5SAlex Xie }
660e60f8db5SAlex Xie 
661cd2b5623SAlex Deucher static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
662cd2b5623SAlex Deucher {
663cd2b5623SAlex Deucher 
664cd2b5623SAlex Deucher 	/*
665cd2b5623SAlex Deucher 	 * TODO:
666cd2b5623SAlex Deucher 	 * Currently there is a bug where some memory client outside
667cd2b5623SAlex Deucher 	 * of the driver writes to first 8M of VRAM on S3 resume,
668cd2b5623SAlex Deucher 	 * this overrides GART which by default gets placed in first 8M and
669cd2b5623SAlex Deucher 	 * causes VM_FAULTS once GTT is accessed.
670cd2b5623SAlex Deucher 	 * Keep the stolen memory reservation until the while this is not solved.
671cd2b5623SAlex Deucher 	 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
672cd2b5623SAlex Deucher 	 */
673cd2b5623SAlex Deucher 	switch (adev->asic_type) {
67495010ba7SAlex Deucher 	case CHIP_VEGA10:
6756abc0c8fSAlex Deucher 		return true;
6766abc0c8fSAlex Deucher 	case CHIP_RAVEN:
6776abc0c8fSAlex Deucher 	case CHIP_VEGA12:
678cd2b5623SAlex Deucher 	case CHIP_VEGA20:
679cd2b5623SAlex Deucher 	default:
6806abc0c8fSAlex Deucher 		return false;
681cd2b5623SAlex Deucher 	}
682cd2b5623SAlex Deucher }
683cd2b5623SAlex Deucher 
684c713a461SEvan Quan static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
685c713a461SEvan Quan {
686c713a461SEvan Quan 	struct amdgpu_ring *ring;
687c713a461SEvan Quan 	unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
688c713a461SEvan Quan 		{GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP};
689c713a461SEvan Quan 	unsigned i;
690c713a461SEvan Quan 	unsigned vmhub, inv_eng;
691c713a461SEvan Quan 
692c713a461SEvan Quan 	for (i = 0; i < adev->num_rings; ++i) {
693c713a461SEvan Quan 		ring = adev->rings[i];
694c713a461SEvan Quan 		vmhub = ring->funcs->vmhub;
695c713a461SEvan Quan 
696c713a461SEvan Quan 		inv_eng = ffs(vm_inv_engs[vmhub]);
697c713a461SEvan Quan 		if (!inv_eng) {
698c713a461SEvan Quan 			dev_err(adev->dev, "no VM inv eng for ring %s\n",
699c713a461SEvan Quan 				ring->name);
700c713a461SEvan Quan 			return -EINVAL;
701c713a461SEvan Quan 		}
702c713a461SEvan Quan 
703c713a461SEvan Quan 		ring->vm_inv_eng = inv_eng - 1;
704c713a461SEvan Quan 		change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
705c713a461SEvan Quan 
706c713a461SEvan Quan 		dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
707c713a461SEvan Quan 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
708c713a461SEvan Quan 	}
709c713a461SEvan Quan 
710c713a461SEvan Quan 	return 0;
711c713a461SEvan Quan }
712c713a461SEvan Quan 
713791c4769Sxinhui pan static int gmc_v9_0_ecc_late_init(void *handle)
714791c4769Sxinhui pan {
715791c4769Sxinhui pan 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
716791c4769Sxinhui pan 	struct ras_common_if **ras_if = &adev->gmc.ras_if;
717791c4769Sxinhui pan 	struct ras_ih_if ih_info = {
718791c4769Sxinhui pan 		.cb = gmc_v9_0_process_ras_data_cb,
719791c4769Sxinhui pan 	};
720791c4769Sxinhui pan 	struct ras_fs_if fs_info = {
721791c4769Sxinhui pan 		.sysfs_name = "umc_err_count",
722791c4769Sxinhui pan 		.debugfs_name = "umc_err_inject",
723791c4769Sxinhui pan 	};
724791c4769Sxinhui pan 	struct ras_common_if ras_block = {
725791c4769Sxinhui pan 		.block = AMDGPU_RAS_BLOCK__UMC,
726791c4769Sxinhui pan 		.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
727791c4769Sxinhui pan 		.sub_block_index = 0,
728791c4769Sxinhui pan 		.name = "umc",
729791c4769Sxinhui pan 	};
730791c4769Sxinhui pan 	int r;
731791c4769Sxinhui pan 
732791c4769Sxinhui pan 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
733791c4769Sxinhui pan 		amdgpu_ras_feature_enable(adev, &ras_block, 0);
734791c4769Sxinhui pan 		return 0;
735791c4769Sxinhui pan 	}
736acbbee01Sxinhui pan 	/* handle resume path. */
737acbbee01Sxinhui pan 	if (*ras_if)
738acbbee01Sxinhui pan 		goto resume;
739791c4769Sxinhui pan 
740791c4769Sxinhui pan 	*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
741791c4769Sxinhui pan 	if (!*ras_if)
742791c4769Sxinhui pan 		return -ENOMEM;
743791c4769Sxinhui pan 
744791c4769Sxinhui pan 	**ras_if = ras_block;
745791c4769Sxinhui pan 
746791c4769Sxinhui pan 	r = amdgpu_ras_feature_enable(adev, *ras_if, 1);
747791c4769Sxinhui pan 	if (r)
748791c4769Sxinhui pan 		goto feature;
749791c4769Sxinhui pan 
750791c4769Sxinhui pan 	ih_info.head = **ras_if;
751791c4769Sxinhui pan 	fs_info.head = **ras_if;
752791c4769Sxinhui pan 
753791c4769Sxinhui pan 	r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
754791c4769Sxinhui pan 	if (r)
755791c4769Sxinhui pan 		goto interrupt;
756791c4769Sxinhui pan 
757791c4769Sxinhui pan 	r = amdgpu_ras_debugfs_create(adev, &fs_info);
758791c4769Sxinhui pan 	if (r)
759791c4769Sxinhui pan 		goto debugfs;
760791c4769Sxinhui pan 
761791c4769Sxinhui pan 	r = amdgpu_ras_sysfs_create(adev, &fs_info);
762791c4769Sxinhui pan 	if (r)
763791c4769Sxinhui pan 		goto sysfs;
764acbbee01Sxinhui pan resume:
765791c4769Sxinhui pan 	r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
766791c4769Sxinhui pan 	if (r)
767791c4769Sxinhui pan 		goto irq;
768791c4769Sxinhui pan 
769791c4769Sxinhui pan 	return 0;
770791c4769Sxinhui pan irq:
771791c4769Sxinhui pan 	amdgpu_ras_sysfs_remove(adev, *ras_if);
772791c4769Sxinhui pan sysfs:
773791c4769Sxinhui pan 	amdgpu_ras_debugfs_remove(adev, *ras_if);
774791c4769Sxinhui pan debugfs:
775791c4769Sxinhui pan 	amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
776791c4769Sxinhui pan interrupt:
777791c4769Sxinhui pan 	amdgpu_ras_feature_enable(adev, *ras_if, 0);
778791c4769Sxinhui pan feature:
779791c4769Sxinhui pan 	kfree(*ras_if);
780791c4769Sxinhui pan 	*ras_if = NULL;
781791c4769Sxinhui pan 	return -EINVAL;
782791c4769Sxinhui pan }
783791c4769Sxinhui pan 
784791c4769Sxinhui pan 
785e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
786e60f8db5SAlex Xie {
787e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
788f49ea9f8SHawking Zhang 	bool r;
7894789c463SChristian König 
790cd2b5623SAlex Deucher 	if (!gmc_v9_0_keep_stolen_memory(adev))
791cd2b5623SAlex Deucher 		amdgpu_bo_late_init(adev);
7926f752ec2SAndrey Grodzovsky 
793c713a461SEvan Quan 	r = gmc_v9_0_allocate_vm_inv_eng(adev);
794c713a461SEvan Quan 	if (r)
795c713a461SEvan Quan 		return r;
796f49ea9f8SHawking Zhang 	/* Check if ecc is available */
797f49ea9f8SHawking Zhang 	if (!amdgpu_sriov_vf(adev)) {
798f49ea9f8SHawking Zhang 		switch (adev->asic_type) {
799f49ea9f8SHawking Zhang 		case CHIP_VEGA10:
800f49ea9f8SHawking Zhang 		case CHIP_VEGA20:
801f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_mem_ecc_supported(adev);
802f49ea9f8SHawking Zhang 			if (!r) {
80302bab923SDavid Panariti 				DRM_INFO("ECC is not present.\n");
804f49ea9f8SHawking Zhang 				if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
805e1d1a772SAlex Deucher 					adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
80602bab923SDavid Panariti 			} else {
807f49ea9f8SHawking Zhang 				DRM_INFO("ECC is active.\n");
808f49ea9f8SHawking Zhang 			}
809f49ea9f8SHawking Zhang 
810f49ea9f8SHawking Zhang 			r = amdgpu_atomfirmware_sram_ecc_supported(adev);
811f49ea9f8SHawking Zhang 			if (!r) {
812f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is not present.\n");
813f49ea9f8SHawking Zhang 			} else {
814f49ea9f8SHawking Zhang 				DRM_INFO("SRAM ECC is active.\n");
815f49ea9f8SHawking Zhang 			}
816f49ea9f8SHawking Zhang 			break;
817f49ea9f8SHawking Zhang 		default:
818f49ea9f8SHawking Zhang 			break;
81902bab923SDavid Panariti 		}
8205ba4fa35SAlex Deucher 	}
82102bab923SDavid Panariti 
822791c4769Sxinhui pan 	r = gmc_v9_0_ecc_late_init(handle);
823791c4769Sxinhui pan 	if (r)
824791c4769Sxinhui pan 		return r;
825791c4769Sxinhui pan 
826770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
827e60f8db5SAlex Xie }
828e60f8db5SAlex Xie 
829e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
830770d13b1SChristian König 					struct amdgpu_gmc *mc)
831e60f8db5SAlex Xie {
832eeb2487dSMonk Liu 	u64 base = 0;
833eeb2487dSMonk Liu 	if (!amdgpu_sriov_vf(adev))
834eeb2487dSMonk Liu 		base = mmhub_v1_0_get_fb_location(adev);
8356fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
8366fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
83783afe835SOak Zeng 	amdgpu_gmc_vram_location(adev, mc, base);
838961c75cfSChristian König 	amdgpu_gmc_gart_location(adev, mc);
839c3e1b43cSChristian König 	if (!amdgpu_sriov_vf(adev))
840c3e1b43cSChristian König 		amdgpu_gmc_agp_location(adev, mc);
841bc099ee9SChunming Zhou 	/* base offset of vram pages */
842bc099ee9SChunming Zhou 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
8436fdd68b1SAlex Deucher 
8446fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
8456fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
8466fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
847e60f8db5SAlex Xie }
848e60f8db5SAlex Xie 
849e60f8db5SAlex Xie /**
850e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
851e60f8db5SAlex Xie  *
852e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
853e60f8db5SAlex Xie  *
854e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
855e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
856e60f8db5SAlex Xie  * Returns 0 for success.
857e60f8db5SAlex Xie  */
858e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
859e60f8db5SAlex Xie {
860e60f8db5SAlex Xie 	int chansize, numchan;
861d6895ad3SChristian König 	int r;
862e60f8db5SAlex Xie 
8633d918c0eSShaoyun Liu 	if (amdgpu_emu_mode != 1)
864770d13b1SChristian König 		adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
865770d13b1SChristian König 	if (!adev->gmc.vram_width) {
866e60f8db5SAlex Xie 		/* hbm memory channel size */
867585b7f16STom St Denis 		if (adev->flags & AMD_IS_APU)
868585b7f16STom St Denis 			chansize = 64;
869585b7f16STom St Denis 		else
870e60f8db5SAlex Xie 			chansize = 128;
871e60f8db5SAlex Xie 
872070706c0SHawking Zhang 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
873770d13b1SChristian König 		adev->gmc.vram_width = numchan * chansize;
8748d6a5230SAlex Deucher 	}
875e60f8db5SAlex Xie 
876e60f8db5SAlex Xie 	/* size in MB on si */
877770d13b1SChristian König 	adev->gmc.mc_vram_size =
878bf383fb6SAlex Deucher 		adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
879770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
880d6895ad3SChristian König 
881d6895ad3SChristian König 	if (!(adev->flags & AMD_IS_APU)) {
882d6895ad3SChristian König 		r = amdgpu_device_resize_fb_bar(adev);
883d6895ad3SChristian König 		if (r)
884d6895ad3SChristian König 			return r;
885d6895ad3SChristian König 	}
886770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
887770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
888e60f8db5SAlex Xie 
889156a81beSChunming Zhou #ifdef CONFIG_X86_64
890156a81beSChunming Zhou 	if (adev->flags & AMD_IS_APU) {
891156a81beSChunming Zhou 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
892156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
893156a81beSChunming Zhou 	}
894156a81beSChunming Zhou #endif
895e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
896770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
897770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
898770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
899e60f8db5SAlex Xie 
900c3db7b5aSAlex Deucher 	/* set the gart size */
901c3db7b5aSAlex Deucher 	if (amdgpu_gart_size == -1) {
902c3db7b5aSAlex Deucher 		switch (adev->asic_type) {
903c3db7b5aSAlex Deucher 		case CHIP_VEGA10:  /* all engines support GPUVM */
904273a14cdSAlex Deucher 		case CHIP_VEGA12:  /* all engines support GPUVM */
905d96b428cSFeifei Xu 		case CHIP_VEGA20:
906c3db7b5aSAlex Deucher 		default:
907fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
908c3db7b5aSAlex Deucher 			break;
909c3db7b5aSAlex Deucher 		case CHIP_RAVEN:   /* DCE SG support */
910770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
911c3db7b5aSAlex Deucher 			break;
912c3db7b5aSAlex Deucher 		}
913c3db7b5aSAlex Deucher 	} else {
914770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
915c3db7b5aSAlex Deucher 	}
916c3db7b5aSAlex Deucher 
917770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
918e60f8db5SAlex Xie 
919e60f8db5SAlex Xie 	return 0;
920e60f8db5SAlex Xie }
921e60f8db5SAlex Xie 
922e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
923e60f8db5SAlex Xie {
924e60f8db5SAlex Xie 	int r;
925e60f8db5SAlex Xie 
9261123b989SChristian König 	if (adev->gart.bo) {
927e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
928e60f8db5SAlex Xie 		return 0;
929e60f8db5SAlex Xie 	}
930e60f8db5SAlex Xie 	/* Initialize common gart structure */
931e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
932e60f8db5SAlex Xie 	if (r)
933e60f8db5SAlex Xie 		return r;
934e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
935e60f8db5SAlex Xie 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
936e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
937e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
938e60f8db5SAlex Xie }
939e60f8db5SAlex Xie 
940ebdef28eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
941ebdef28eSAlex Deucher {
942ebdef28eSAlex Deucher 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
943ebdef28eSAlex Deucher 	unsigned size;
944ebdef28eSAlex Deucher 
9456f752ec2SAndrey Grodzovsky 	/*
9466f752ec2SAndrey Grodzovsky 	 * TODO Remove once GART corruption is resolved
9476f752ec2SAndrey Grodzovsky 	 * Check related code in gmc_v9_0_sw_fini
9486f752ec2SAndrey Grodzovsky 	 * */
949cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
950cd2b5623SAlex Deucher 		return 9 * 1024 * 1024;
9516f752ec2SAndrey Grodzovsky 
952ebdef28eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
953ebdef28eSAlex Deucher 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
954ebdef28eSAlex Deucher 	} else {
955ebdef28eSAlex Deucher 		u32 viewport;
956ebdef28eSAlex Deucher 
957ebdef28eSAlex Deucher 		switch (adev->asic_type) {
958ebdef28eSAlex Deucher 		case CHIP_RAVEN:
959ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
960ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
961ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
962ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport,
963ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
964ebdef28eSAlex Deucher 				4);
965ebdef28eSAlex Deucher 			break;
966ebdef28eSAlex Deucher 		case CHIP_VEGA10:
967ebdef28eSAlex Deucher 		case CHIP_VEGA12:
968cd2b5623SAlex Deucher 		case CHIP_VEGA20:
969ebdef28eSAlex Deucher 		default:
970ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
971ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
972ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
973ebdef28eSAlex Deucher 				4);
974ebdef28eSAlex Deucher 			break;
975ebdef28eSAlex Deucher 		}
976ebdef28eSAlex Deucher 	}
977ebdef28eSAlex Deucher 	/* return 0 if the pre-OS buffer uses up most of vram */
978ebdef28eSAlex Deucher 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
979ebdef28eSAlex Deucher 		return 0;
9806f752ec2SAndrey Grodzovsky 
981ebdef28eSAlex Deucher 	return size;
982ebdef28eSAlex Deucher }
983ebdef28eSAlex Deucher 
984e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
985e60f8db5SAlex Xie {
986e60f8db5SAlex Xie 	int r;
987e60f8db5SAlex Xie 	int dma_bits;
988e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
989e60f8db5SAlex Xie 
9900c8c0847SHuang Rui 	gfxhub_v1_0_init(adev);
99177f6c763SHuang Rui 	mmhub_v1_0_init(adev);
9920c8c0847SHuang Rui 
993770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
994e60f8db5SAlex Xie 
9951e09b053SHawking Zhang 	adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
996fd66560bSHawking Zhang 	switch (adev->asic_type) {
997fd66560bSHawking Zhang 	case CHIP_RAVEN:
9986a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
999f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
10006a42fd6fSChristian König 		} else {
10016a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
10026a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1003770d13b1SChristian König 			adev->gmc.translate_further =
10046a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
10056a42fd6fSChristian König 		}
1006fd66560bSHawking Zhang 		break;
1007fd66560bSHawking Zhang 	case CHIP_VEGA10:
1008273a14cdSAlex Deucher 	case CHIP_VEGA12:
1009d96b428cSFeifei Xu 	case CHIP_VEGA20:
101036b32a68SZhang, Jerry 		/*
101136b32a68SZhang, Jerry 		 * To fulfill 4-level page support,
101236b32a68SZhang, Jerry 		 * vm size is 256TB (48bit), maximum size of Vega10,
101336b32a68SZhang, Jerry 		 * block size 512 (9bit)
101436b32a68SZhang, Jerry 		 */
1015cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1016cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
1017cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1018cdba61daSwentalou 		else
1019f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1020fd66560bSHawking Zhang 		break;
1021fd66560bSHawking Zhang 	default:
1022fd66560bSHawking Zhang 		break;
1023fd66560bSHawking Zhang 	}
1024fd66560bSHawking Zhang 
1025e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
102644a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1027770d13b1SChristian König 				&adev->gmc.vm_fault);
102830da7bb1SChristian König 	if (r)
102930da7bb1SChristian König 		return r;
103030da7bb1SChristian König 
103144a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1032770d13b1SChristian König 				&adev->gmc.vm_fault);
1033e60f8db5SAlex Xie 
1034e60f8db5SAlex Xie 	if (r)
1035e60f8db5SAlex Xie 		return r;
1036e60f8db5SAlex Xie 
1037791c4769Sxinhui pan 	/* interrupt sent to DF. */
1038791c4769Sxinhui pan 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1039791c4769Sxinhui pan 			&adev->gmc.ecc_irq);
1040791c4769Sxinhui pan 	if (r)
1041791c4769Sxinhui pan 		return r;
1042791c4769Sxinhui pan 
1043e60f8db5SAlex Xie 	/* Set the internal MC address mask
1044e60f8db5SAlex Xie 	 * This is the max address of the GPU's
1045e60f8db5SAlex Xie 	 * internal address space.
1046e60f8db5SAlex Xie 	 */
1047770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1048e60f8db5SAlex Xie 
1049e60f8db5SAlex Xie 	/* set DMA mask + need_dma32 flags.
1050e60f8db5SAlex Xie 	 * PCIE - can handle 44-bits.
1051e60f8db5SAlex Xie 	 * IGP - can handle 44-bits
1052e60f8db5SAlex Xie 	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
1053e60f8db5SAlex Xie 	 */
1054e60f8db5SAlex Xie 	adev->need_dma32 = false;
1055e60f8db5SAlex Xie 	dma_bits = adev->need_dma32 ? 32 : 44;
1056e60f8db5SAlex Xie 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1057e60f8db5SAlex Xie 	if (r) {
1058e60f8db5SAlex Xie 		adev->need_dma32 = true;
1059e60f8db5SAlex Xie 		dma_bits = 32;
1060e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1061e60f8db5SAlex Xie 	}
1062e60f8db5SAlex Xie 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1063e60f8db5SAlex Xie 	if (r) {
1064e60f8db5SAlex Xie 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1065e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
1066e60f8db5SAlex Xie 	}
1067fd5fd480SChunming Zhou 	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
1068e60f8db5SAlex Xie 
106947622ba0SAlex Deucher 	if (adev->gmc.xgmi.supported) {
1070bf0a60b7SAlex Deucher 		r = gfxhub_v1_1_get_xgmi_info(adev);
1071bf0a60b7SAlex Deucher 		if (r)
1072bf0a60b7SAlex Deucher 			return r;
1073bf0a60b7SAlex Deucher 	}
1074bf0a60b7SAlex Deucher 
1075e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
1076e60f8db5SAlex Xie 	if (r)
1077e60f8db5SAlex Xie 		return r;
1078e60f8db5SAlex Xie 
1079ebdef28eSAlex Deucher 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1080ebdef28eSAlex Deucher 
1081e60f8db5SAlex Xie 	/* Memory manager */
1082e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
1083e60f8db5SAlex Xie 	if (r)
1084e60f8db5SAlex Xie 		return r;
1085e60f8db5SAlex Xie 
1086e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
1087e60f8db5SAlex Xie 	if (r)
1088e60f8db5SAlex Xie 		return r;
1089e60f8db5SAlex Xie 
109005ec3edaSChristian König 	/*
109105ec3edaSChristian König 	 * number of VMs
109205ec3edaSChristian König 	 * VMID 0 is reserved for System
109305ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
109405ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
109505ec3edaSChristian König 	 */
109605ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
109705ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
109805ec3edaSChristian König 
109905ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
110005ec3edaSChristian König 
110105ec3edaSChristian König 	return 0;
1102e60f8db5SAlex Xie }
1103e60f8db5SAlex Xie 
1104e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
1105e60f8db5SAlex Xie {
1106e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1107e60f8db5SAlex Xie 
1108791c4769Sxinhui pan 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1109791c4769Sxinhui pan 			adev->gmc.ras_if) {
1110791c4769Sxinhui pan 		struct ras_common_if *ras_if = adev->gmc.ras_if;
1111791c4769Sxinhui pan 		struct ras_ih_if ih_info = {
1112791c4769Sxinhui pan 			.head = *ras_if,
1113791c4769Sxinhui pan 		};
1114791c4769Sxinhui pan 
1115791c4769Sxinhui pan 		/*remove fs first*/
1116791c4769Sxinhui pan 		amdgpu_ras_debugfs_remove(adev, ras_if);
1117791c4769Sxinhui pan 		amdgpu_ras_sysfs_remove(adev, ras_if);
1118791c4769Sxinhui pan 		/*remove the IH*/
1119791c4769Sxinhui pan 		amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1120791c4769Sxinhui pan 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1121791c4769Sxinhui pan 		kfree(ras_if);
1122791c4769Sxinhui pan 	}
1123791c4769Sxinhui pan 
1124f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
1125e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
11266f752ec2SAndrey Grodzovsky 
1127cd2b5623SAlex Deucher 	if (gmc_v9_0_keep_stolen_memory(adev))
11286f752ec2SAndrey Grodzovsky 		amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
11296f752ec2SAndrey Grodzovsky 
1130a3d9103eSAndrey Grodzovsky 	amdgpu_gart_table_vram_free(adev);
1131e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
1132a3d9103eSAndrey Grodzovsky 	amdgpu_gart_fini(adev);
1133e60f8db5SAlex Xie 
1134e60f8db5SAlex Xie 	return 0;
1135e60f8db5SAlex Xie }
1136e60f8db5SAlex Xie 
1137e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1138e60f8db5SAlex Xie {
1139946a4d5bSShaoyun Liu 
1140e60f8db5SAlex Xie 	switch (adev->asic_type) {
1141e60f8db5SAlex Xie 	case CHIP_VEGA10:
1142d96b428cSFeifei Xu 	case CHIP_VEGA20:
1143946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11445c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
1145c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1146946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11475c583018SEvan Quan 						golden_settings_athub_1_0_0,
1148c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1149e60f8db5SAlex Xie 		break;
1150273a14cdSAlex Deucher 	case CHIP_VEGA12:
1151273a14cdSAlex Deucher 		break;
1152e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
1153946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
11545c583018SEvan Quan 						golden_settings_athub_1_0_0,
1155c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1156e4f3abaaSChunming Zhou 		break;
1157e60f8db5SAlex Xie 	default:
1158e60f8db5SAlex Xie 		break;
1159e60f8db5SAlex Xie 	}
1160e60f8db5SAlex Xie }
1161e60f8db5SAlex Xie 
1162e60f8db5SAlex Xie /**
1163e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1164e60f8db5SAlex Xie  *
1165e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1166e60f8db5SAlex Xie  */
1167e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1168e60f8db5SAlex Xie {
1169e60f8db5SAlex Xie 	int r;
1170e60f8db5SAlex Xie 	bool value;
1171e60f8db5SAlex Xie 	u32 tmp;
1172e60f8db5SAlex Xie 
11739c3f2b54SAlex Deucher 	amdgpu_device_program_register_sequence(adev,
1174e60f8db5SAlex Xie 						golden_settings_vega10_hdp,
1175c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_vega10_hdp));
1176e60f8db5SAlex Xie 
11771123b989SChristian König 	if (adev->gart.bo == NULL) {
1178e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1179e60f8db5SAlex Xie 		return -EINVAL;
1180e60f8db5SAlex Xie 	}
1181ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
1182ce1b1b66SMonk Liu 	if (r)
1183ce1b1b66SMonk Liu 		return r;
1184e60f8db5SAlex Xie 
11852fcd43ceSHawking Zhang 	switch (adev->asic_type) {
11862fcd43ceSHawking Zhang 	case CHIP_RAVEN:
1187f8386b35SHawking Zhang 		mmhub_v1_0_update_power_gating(adev, true);
11882fcd43ceSHawking Zhang 		break;
11892fcd43ceSHawking Zhang 	default:
11902fcd43ceSHawking Zhang 		break;
11912fcd43ceSHawking Zhang 	}
11922fcd43ceSHawking Zhang 
1193e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
1194e60f8db5SAlex Xie 	if (r)
1195e60f8db5SAlex Xie 		return r;
1196e60f8db5SAlex Xie 
1197e60f8db5SAlex Xie 	r = mmhub_v1_0_gart_enable(adev);
1198e60f8db5SAlex Xie 	if (r)
1199e60f8db5SAlex Xie 		return r;
1200e60f8db5SAlex Xie 
1201846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1202e60f8db5SAlex Xie 
1203b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1204b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1205e60f8db5SAlex Xie 
12061d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
120769882565SChristian König 	adev->nbio_funcs->hdp_flush(adev, NULL);
12081d4e0a8cSMonk Liu 
1209e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1210e60f8db5SAlex Xie 		value = false;
1211e60f8db5SAlex Xie 	else
1212e60f8db5SAlex Xie 		value = true;
1213e60f8db5SAlex Xie 
1214e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
1215e60f8db5SAlex Xie 	mmhub_v1_0_set_fault_enable_default(adev, value);
12162a79d868SYong Zhao 	gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
1217e60f8db5SAlex Xie 
1218e60f8db5SAlex Xie 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1219770d13b1SChristian König 		 (unsigned)(adev->gmc.gart_size >> 20),
12204e830fb1SChristian König 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1221e60f8db5SAlex Xie 	adev->gart.ready = true;
1222e60f8db5SAlex Xie 	return 0;
1223e60f8db5SAlex Xie }
1224e60f8db5SAlex Xie 
1225e60f8db5SAlex Xie static int gmc_v9_0_hw_init(void *handle)
1226e60f8db5SAlex Xie {
1227e60f8db5SAlex Xie 	int r;
1228e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1229e60f8db5SAlex Xie 
1230e60f8db5SAlex Xie 	/* The sequence of these two function calls matters.*/
1231e60f8db5SAlex Xie 	gmc_v9_0_init_golden_registers(adev);
1232e60f8db5SAlex Xie 
1233edca2d05SAlex Deucher 	if (adev->mode_info.num_crtc) {
1234edca2d05SAlex Deucher 		/* Lockout access through VGA aperture*/
12354d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1236edca2d05SAlex Deucher 
1237edca2d05SAlex Deucher 		/* disable VGA render */
12384d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1239edca2d05SAlex Deucher 	}
1240edca2d05SAlex Deucher 
1241e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1242e60f8db5SAlex Xie 
1243e60f8db5SAlex Xie 	return r;
1244e60f8db5SAlex Xie }
1245e60f8db5SAlex Xie 
1246e60f8db5SAlex Xie /**
1247e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1248e60f8db5SAlex Xie  *
1249e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1250e60f8db5SAlex Xie  *
1251e60f8db5SAlex Xie  * This disables all VM page table.
1252e60f8db5SAlex Xie  */
1253e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1254e60f8db5SAlex Xie {
1255e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
1256e60f8db5SAlex Xie 	mmhub_v1_0_gart_disable(adev);
1257ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1258e60f8db5SAlex Xie }
1259e60f8db5SAlex Xie 
1260e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1261e60f8db5SAlex Xie {
1262e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1263e60f8db5SAlex Xie 
12645dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
12655dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
12665dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
12675dd696aeSTrigger Huang 		return 0;
12685dd696aeSTrigger Huang 	}
12695dd696aeSTrigger Huang 
1270791c4769Sxinhui pan 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1271770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1272e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1273e60f8db5SAlex Xie 
1274e60f8db5SAlex Xie 	return 0;
1275e60f8db5SAlex Xie }
1276e60f8db5SAlex Xie 
1277e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1278e60f8db5SAlex Xie {
1279e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1280e60f8db5SAlex Xie 
1281f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1282e60f8db5SAlex Xie }
1283e60f8db5SAlex Xie 
1284e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1285e60f8db5SAlex Xie {
1286e60f8db5SAlex Xie 	int r;
1287e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1288e60f8db5SAlex Xie 
1289e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1290e60f8db5SAlex Xie 	if (r)
1291e60f8db5SAlex Xie 		return r;
1292e60f8db5SAlex Xie 
1293620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1294e60f8db5SAlex Xie 
129532601d48SChristian König 	return 0;
1296e60f8db5SAlex Xie }
1297e60f8db5SAlex Xie 
1298e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1299e60f8db5SAlex Xie {
1300e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1301e60f8db5SAlex Xie 	return true;
1302e60f8db5SAlex Xie }
1303e60f8db5SAlex Xie 
1304e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1305e60f8db5SAlex Xie {
1306e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1307e60f8db5SAlex Xie 	return 0;
1308e60f8db5SAlex Xie }
1309e60f8db5SAlex Xie 
1310e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1311e60f8db5SAlex Xie {
1312e60f8db5SAlex Xie 	/* XXX for emulation.*/
1313e60f8db5SAlex Xie 	return 0;
1314e60f8db5SAlex Xie }
1315e60f8db5SAlex Xie 
1316e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1317e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1318e60f8db5SAlex Xie {
1319d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1320d5583d4fSHuang Rui 
1321d5583d4fSHuang Rui 	return mmhub_v1_0_set_clockgating(adev, state);
1322e60f8db5SAlex Xie }
1323e60f8db5SAlex Xie 
132413052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
132513052be5SHuang Rui {
132613052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
132713052be5SHuang Rui 
132813052be5SHuang Rui 	mmhub_v1_0_get_clockgating(adev, flags);
132913052be5SHuang Rui }
133013052be5SHuang Rui 
1331e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1332e60f8db5SAlex Xie 					enum amd_powergating_state state)
1333e60f8db5SAlex Xie {
1334e60f8db5SAlex Xie 	return 0;
1335e60f8db5SAlex Xie }
1336e60f8db5SAlex Xie 
1337e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1338e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1339e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1340e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1341e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1342e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1343e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1344e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1345e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1346e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1347e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1348e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1349e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1350e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1351e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
135213052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1353e60f8db5SAlex Xie };
1354e60f8db5SAlex Xie 
1355e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1356e60f8db5SAlex Xie {
1357e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1358e60f8db5SAlex Xie 	.major = 9,
1359e60f8db5SAlex Xie 	.minor = 0,
1360e60f8db5SAlex Xie 	.rev = 0,
1361e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1362e60f8db5SAlex Xie };
1363