xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c (revision 174cd4b1)
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Xiangliang.Yu@amd.com
23  */
24 
25 #include "amdgpu.h"
26 #include "vi.h"
27 #include "bif/bif_5_0_d.h"
28 #include "bif/bif_5_0_sh_mask.h"
29 #include "vid.h"
30 #include "gca/gfx_8_0_d.h"
31 #include "gca/gfx_8_0_sh_mask.h"
32 #include "gmc_v8_0.h"
33 #include "gfx_v8_0.h"
34 #include "sdma_v3_0.h"
35 #include "tonga_ih.h"
36 #include "gmc/gmc_8_2_d.h"
37 #include "gmc/gmc_8_2_sh_mask.h"
38 #include "oss/oss_3_0_d.h"
39 #include "oss/oss_3_0_sh_mask.h"
40 #include "gca/gfx_8_0_sh_mask.h"
41 #include "dce/dce_10_0_d.h"
42 #include "dce/dce_10_0_sh_mask.h"
43 #include "smu/smu_7_1_3_d.h"
44 #include "mxgpu_vi.h"
45 
46 /* VI golden setting */
47 static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
48 	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
49 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
50 	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
51 	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
52 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
53 	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
54 	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
55 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
56 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
57 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
58 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
59 	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
60 	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
61 	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
62 	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
63 	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
64 	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
65 	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
66 	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
67 	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
68 	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
69 	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
70 	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
71 	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
72 	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
73 	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
74 	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
75 	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
76 	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
77 	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
78 	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
79 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
80 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
81 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
82 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
83 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
84 	mmPCIE_DATA, 0x000f0000, 0x00000000,
85 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
86 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
87 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
88 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
89 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
90 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
91 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
92 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
93 	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
94 	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
95 };
96 
97 static const u32 xgpu_fiji_golden_settings_a10[] = {
98 	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
99 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
100 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
101 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
102 	mmFBC_MISC, 0x1f311fff, 0x12300000,
103 	mmHDMI_CONTROL, 0x31000111, 0x00000011,
104 	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
105 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
106 	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
107 	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
108 	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
109 	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
110 	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
111 	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
112 	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
113 	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
114 	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
115 	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
116 	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
117 	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
118 	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
119 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
120 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
121 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
122 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
123 };
124 
125 static const u32 xgpu_fiji_golden_common_all[] = {
126 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
127 	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
128 	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
129 	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
130 	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
131 	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
132 	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
133 	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
134 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
135 	mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
136 };
137 
138 static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
139 	mmRLC_CGTT_MGCG_OVERRIDE,   0xffffffff, 0xffffffff,
140 	mmGRBM_GFX_INDEX,           0xffffffff, 0xe0000000,
141 	mmCB_CGTT_SCLK_CTRL,        0xffffffff, 0x00000100,
142 	mmCGTT_BCI_CLK_CTRL,        0xffffffff, 0x00000100,
143 	mmCGTT_CP_CLK_CTRL,         0xffffffff, 0x00000100,
144 	mmCGTT_CPC_CLK_CTRL,        0xffffffff, 0x00000100,
145 	mmCGTT_CPF_CLK_CTRL,        0xffffffff, 0x40000100,
146 	mmCGTT_DRM_CLK_CTRL0,       0xffffffff, 0x00600100,
147 	mmCGTT_GDS_CLK_CTRL,        0xffffffff, 0x00000100,
148 	mmCGTT_IA_CLK_CTRL,         0xffffffff, 0x06000100,
149 	mmCGTT_PA_CLK_CTRL,         0xffffffff, 0x00000100,
150 	mmCGTT_WD_CLK_CTRL,         0xffffffff, 0x06000100,
151 	mmCGTT_PC_CLK_CTRL,         0xffffffff, 0x00000100,
152 	mmCGTT_RLC_CLK_CTRL,        0xffffffff, 0x00000100,
153 	mmCGTT_SC_CLK_CTRL,         0xffffffff, 0x00000100,
154 	mmCGTT_SPI_CLK_CTRL,        0xffffffff, 0x00000100,
155 	mmCGTT_SQ_CLK_CTRL,         0xffffffff, 0x00000100,
156 	mmCGTT_SQG_CLK_CTRL,        0xffffffff, 0x00000100,
157 	mmCGTT_SX_CLK_CTRL0,        0xffffffff, 0x00000100,
158 	mmCGTT_SX_CLK_CTRL1,        0xffffffff, 0x00000100,
159 	mmCGTT_SX_CLK_CTRL2,        0xffffffff, 0x00000100,
160 	mmCGTT_SX_CLK_CTRL3,        0xffffffff, 0x00000100,
161 	mmCGTT_SX_CLK_CTRL4,        0xffffffff, 0x00000100,
162 	mmCGTT_TCI_CLK_CTRL,        0xffffffff, 0x00000100,
163 	mmCGTT_TCP_CLK_CTRL,        0xffffffff, 0x00000100,
164 	mmCGTT_VGT_CLK_CTRL,        0xffffffff, 0x06000100,
165 	mmDB_CGTT_CLK_CTRL_0,       0xffffffff, 0x00000100,
166 	mmTA_CGTT_CTRL,             0xffffffff, 0x00000100,
167 	mmTCA_CGTT_SCLK_CTRL,       0xffffffff, 0x00000100,
168 	mmTCC_CGTT_SCLK_CTRL,       0xffffffff, 0x00000100,
169 	mmTD_CGTT_CTRL,             0xffffffff, 0x00000100,
170 	mmGRBM_GFX_INDEX,           0xffffffff, 0xe0000000,
171 	mmCGTS_CU0_SP0_CTRL_REG,    0xffffffff, 0x00010000,
172 	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
173 	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
174 	mmCGTS_CU0_SP1_CTRL_REG,    0xffffffff, 0x00060005,
175 	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
176 	mmCGTS_CU1_SP0_CTRL_REG,    0xffffffff, 0x00010000,
177 	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
178 	mmCGTS_CU1_TA_CTRL_REG,     0xffffffff, 0x00040007,
179 	mmCGTS_CU1_SP1_CTRL_REG,    0xffffffff, 0x00060005,
180 	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
181 	mmCGTS_CU2_SP0_CTRL_REG,    0xffffffff, 0x00010000,
182 	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
183 	mmCGTS_CU2_TA_CTRL_REG,     0xffffffff, 0x00040007,
184 	mmCGTS_CU2_SP1_CTRL_REG,    0xffffffff, 0x00060005,
185 	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
186 	mmCGTS_CU3_SP0_CTRL_REG,    0xffffffff, 0x00010000,
187 	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
188 	mmCGTS_CU3_TA_CTRL_REG,     0xffffffff, 0x00040007,
189 	mmCGTS_CU3_SP1_CTRL_REG,    0xffffffff, 0x00060005,
190 	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
191 	mmCGTS_CU4_SP0_CTRL_REG,    0xffffffff, 0x00010000,
192 	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
193 	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
194 	mmCGTS_CU4_SP1_CTRL_REG,    0xffffffff, 0x00060005,
195 	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
196 	mmCGTS_CU5_SP0_CTRL_REG,    0xffffffff, 0x00010000,
197 	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
198 	mmCGTS_CU5_TA_CTRL_REG,     0xffffffff, 0x00040007,
199 	mmCGTS_CU5_SP1_CTRL_REG,    0xffffffff, 0x00060005,
200 	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
201 	mmCGTS_CU6_SP0_CTRL_REG,    0xffffffff, 0x00010000,
202 	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
203 	mmCGTS_CU6_TA_CTRL_REG,     0xffffffff, 0x00040007,
204 	mmCGTS_CU6_SP1_CTRL_REG,    0xffffffff, 0x00060005,
205 	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
206 	mmCGTS_CU7_SP0_CTRL_REG,    0xffffffff, 0x00010000,
207 	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
208 	mmCGTS_CU7_TA_CTRL_REG,     0xffffffff, 0x00040007,
209 	mmCGTS_CU7_SP1_CTRL_REG,    0xffffffff, 0x00060005,
210 	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
211 	mmCGTS_SM_CTRL_REG,         0xffffffff, 0x96e00200,
212 	mmCP_RB_WPTR_POLL_CNTL,     0xffffffff, 0x00900100,
213 	mmRLC_CGCG_CGLS_CTRL,       0xffffffff, 0x0020003c,
214 	mmPCIE_INDEX,               0xffffffff, 0x0140001c,
215 	mmPCIE_DATA,                0x000f0000, 0x00000000,
216 	mmSMC_IND_INDEX_4,          0xffffffff, 0xC060000C,
217 	mmSMC_IND_DATA_4,           0xc0000fff, 0x00000100,
218 	mmXDMA_CLOCK_GATING_CNTL,   0xffffffff, 0x00000100,
219 	mmXDMA_MEM_POWER_CNTL,      0x00000101, 0x00000000,
220 	mmMC_MEM_POWER_LS,          0xffffffff, 0x00000104,
221 	mmCGTT_DRM_CLK_CTRL0,       0xff000fff, 0x00000100,
222 	mmHDP_XDP_CGTT_BLK_CTRL,    0xc0000fff, 0x00000104,
223 	mmCP_MEM_SLP_CNTL,          0x00000001, 0x00000001,
224 	mmSDMA0_CLK_CTRL,           0xff000ff0, 0x00000100,
225 	mmSDMA1_CLK_CTRL,           0xff000ff0, 0x00000100,
226 };
227 
228 static const u32 xgpu_tonga_golden_settings_a11[] = {
229 	mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
230 	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
231 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
232 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
233 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
234 	mmFBC_MISC, 0x1f311fff, 0x12300000,
235 	mmGB_GPU_ID, 0x0000000f, 0x00000000,
236 	mmHDMI_CONTROL, 0x31000111, 0x00000011,
237 	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
238 	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
239 	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
240 	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
241 	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
242 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
243 	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
244 	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
245 	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
246 	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
247 	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
248 	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
249 	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
250 	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
251 	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
252 	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
253 	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
254 	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
255 	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
256 	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
257 	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
258 	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
259 	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
260 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
261 	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
262 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
263 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
264 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
265 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
266 };
267 
268 static const u32 xgpu_tonga_golden_common_all[] = {
269 	mmGRBM_GFX_INDEX,               0xffffffff, 0xe0000000,
270 	mmPA_SC_RASTER_CONFIG,          0xffffffff, 0x16000012,
271 	mmPA_SC_RASTER_CONFIG_1,        0xffffffff, 0x0000002A,
272 	mmGB_ADDR_CONFIG,               0xffffffff, 0x22011002,
273 	mmSPI_RESOURCE_RESERVE_CU_0,    0xffffffff, 0x00000800,
274 	mmSPI_RESOURCE_RESERVE_CU_1,    0xffffffff, 0x00000800,
275 	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
276 };
277 
278 void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
279 {
280 	switch (adev->asic_type) {
281 	case CHIP_FIJI:
282 		amdgpu_program_register_sequence(adev,
283 						 xgpu_fiji_mgcg_cgcg_init,
284 						 (const u32)ARRAY_SIZE(
285 						 xgpu_fiji_mgcg_cgcg_init));
286 		amdgpu_program_register_sequence(adev,
287 						 xgpu_fiji_golden_settings_a10,
288 						 (const u32)ARRAY_SIZE(
289 						 xgpu_fiji_golden_settings_a10));
290 		amdgpu_program_register_sequence(adev,
291 						 xgpu_fiji_golden_common_all,
292 						 (const u32)ARRAY_SIZE(
293 						 xgpu_fiji_golden_common_all));
294 		break;
295 	case CHIP_TONGA:
296 		amdgpu_program_register_sequence(adev,
297 						 xgpu_tonga_mgcg_cgcg_init,
298 						 (const u32)ARRAY_SIZE(
299 						 xgpu_tonga_mgcg_cgcg_init));
300 		amdgpu_program_register_sequence(adev,
301 						 xgpu_tonga_golden_settings_a11,
302 						 (const u32)ARRAY_SIZE(
303 						 xgpu_tonga_golden_settings_a11));
304 		amdgpu_program_register_sequence(adev,
305 						 xgpu_tonga_golden_common_all,
306 						 (const u32)ARRAY_SIZE(
307 						 xgpu_tonga_golden_common_all));
308 		break;
309 	default:
310 		BUG_ON("Doesn't support chip type.\n");
311 		break;
312 	}
313 }
314 
315 /*
316  * Mailbox communication between GPU hypervisor and VFs
317  */
318 static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
319 {
320 	u32 reg;
321 
322 	reg = RREG32(mmMAILBOX_CONTROL);
323 	reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
324 	WREG32(mmMAILBOX_CONTROL, reg);
325 }
326 
327 static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
328 {
329 	u32 reg;
330 
331 	reg = RREG32(mmMAILBOX_CONTROL);
332 	reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
333 			    TRN_MSG_VALID, val ? 1 : 0);
334 	WREG32(mmMAILBOX_CONTROL, reg);
335 }
336 
337 static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
338 				      enum idh_event event)
339 {
340 	u32 reg;
341 
342 	reg = RREG32(mmMAILBOX_MSGBUF_TRN_DW0);
343 	reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
344 			    MSGBUF_DATA, event);
345 	WREG32(mmMAILBOX_MSGBUF_TRN_DW0, reg);
346 
347 	xgpu_vi_mailbox_set_valid(adev, true);
348 }
349 
350 static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
351 				   enum idh_event event)
352 {
353 	u32 reg;
354 
355 	reg = RREG32(mmMAILBOX_MSGBUF_RCV_DW0);
356 	if (reg != event)
357 		return -ENOENT;
358 
359 	/* send ack to PF */
360 	xgpu_vi_mailbox_send_ack(adev);
361 
362 	return 0;
363 }
364 
365 static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
366 {
367 	int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
368 	u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
369 	u32 reg;
370 
371 	reg = RREG32(mmMAILBOX_CONTROL);
372 	while (!(reg & mask)) {
373 		if (timeout <= 0) {
374 			pr_err("Doesn't get ack from pf.\n");
375 			r = -ETIME;
376 			break;
377 		}
378 		msleep(1);
379 		timeout -= 1;
380 
381 		reg = RREG32(mmMAILBOX_CONTROL);
382 	}
383 
384 	return r;
385 }
386 
387 static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
388 {
389 	int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
390 
391 	r = xgpu_vi_mailbox_rcv_msg(adev, event);
392 	while (r) {
393 		if (timeout <= 0) {
394 			pr_err("Doesn't get ack from pf.\n");
395 			r = -ETIME;
396 			break;
397 		}
398 		msleep(1);
399 		timeout -= 1;
400 
401 		r = xgpu_vi_mailbox_rcv_msg(adev, event);
402 	}
403 
404 	return r;
405 }
406 
407 static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
408 					enum idh_request request)
409 {
410 	int r;
411 
412 	xgpu_vi_mailbox_trans_msg(adev, request);
413 
414 	/* start to poll ack */
415 	r = xgpu_vi_poll_ack(adev);
416 	if (r)
417 		return r;
418 
419 	xgpu_vi_mailbox_set_valid(adev, false);
420 
421 	/* start to check msg if request is idh_req_gpu_init_access */
422 	if (request == IDH_REQ_GPU_INIT_ACCESS) {
423 		r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
424 		if (r)
425 			return r;
426 	}
427 
428 	return 0;
429 }
430 
431 static int xgpu_vi_request_reset(struct amdgpu_device *adev)
432 {
433 	return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
434 }
435 
436 static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
437 					   bool init)
438 {
439 	enum idh_event event;
440 
441 	event = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
442 	return xgpu_vi_send_access_requests(adev, event);
443 }
444 
445 static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
446 					   bool init)
447 {
448 	enum idh_event event;
449 	int r = 0;
450 
451 	event = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
452 	r = xgpu_vi_send_access_requests(adev, event);
453 
454 	return r;
455 }
456 
457 /* add support mailbox interrupts */
458 static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
459 				   struct amdgpu_irq_src *source,
460 				   struct amdgpu_iv_entry *entry)
461 {
462 	DRM_DEBUG("get ack intr and do nothing.\n");
463 	return 0;
464 }
465 
466 static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
467 				       struct amdgpu_irq_src *src,
468 				       unsigned type,
469 				       enum amdgpu_interrupt_state state)
470 {
471 	u32 tmp = RREG32(mmMAILBOX_INT_CNTL);
472 
473 	tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
474 			    (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
475 	WREG32(mmMAILBOX_INT_CNTL, tmp);
476 
477 	return 0;
478 }
479 
480 static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
481 {
482 	struct amdgpu_virt *virt = container_of(work,
483 					struct amdgpu_virt, flr_work.work);
484 	struct amdgpu_device *adev = container_of(virt,
485 					struct amdgpu_device, virt);
486 	int r = 0;
487 
488 	r = xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
489 	if (r)
490 		DRM_ERROR("failed to get flr cmpl msg from hypervior.\n");
491 
492 	/* TODO: need to restore gfx states */
493 }
494 
495 static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
496 				       struct amdgpu_irq_src *src,
497 				       unsigned type,
498 				       enum amdgpu_interrupt_state state)
499 {
500 	u32 tmp = RREG32(mmMAILBOX_INT_CNTL);
501 
502 	tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
503 			    (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
504 	WREG32(mmMAILBOX_INT_CNTL, tmp);
505 
506 	return 0;
507 }
508 
509 static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
510 				   struct amdgpu_irq_src *source,
511 				   struct amdgpu_iv_entry *entry)
512 {
513 	int r;
514 
515 	adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
516 	r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
517 	/* do nothing for other msg */
518 	if (r)
519 		return 0;
520 
521 	/* TODO: need to save gfx states */
522 	schedule_delayed_work(&adev->virt.flr_work,
523 			      msecs_to_jiffies(VI_MAILBOX_RESET_TIME));
524 
525 	return 0;
526 }
527 
528 static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
529 	.set = xgpu_vi_set_mailbox_ack_irq,
530 	.process = xgpu_vi_mailbox_ack_irq,
531 };
532 
533 static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
534 	.set = xgpu_vi_set_mailbox_rcv_irq,
535 	.process = xgpu_vi_mailbox_rcv_irq,
536 };
537 
538 void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
539 {
540 	adev->virt.ack_irq.num_types = 1;
541 	adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
542 	adev->virt.rcv_irq.num_types = 1;
543 	adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
544 }
545 
546 int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
547 {
548 	int r;
549 
550 	r = amdgpu_irq_add_id(adev, 135, &adev->virt.rcv_irq);
551 	if (r)
552 		return r;
553 
554 	r = amdgpu_irq_add_id(adev, 138, &adev->virt.ack_irq);
555 	if (r) {
556 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
557 		return r;
558 	}
559 
560 	return 0;
561 }
562 
563 int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
564 {
565 	int r;
566 
567 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
568 	if (r)
569 		return r;
570 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
571 	if (r) {
572 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
573 		return r;
574 	}
575 
576 	INIT_DELAYED_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
577 
578 	return 0;
579 }
580 
581 void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
582 {
583 	cancel_delayed_work_sync(&adev->virt.flr_work);
584 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
585 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
586 }
587 
588 const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
589 	.req_full_gpu		= xgpu_vi_request_full_gpu_access,
590 	.rel_full_gpu		= xgpu_vi_release_full_gpu_access,
591 	.reset_gpu		= xgpu_vi_request_reset,
592 };
593