xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c (revision f2d8e15b)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36 
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39 
40 #include "vega10_enum.h"
41 
42 #include "soc15_common.h"
43 #include "clearstate_gfx9.h"
44 #include "v9_structs.h"
45 
46 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
47 
48 #include "amdgpu_ras.h"
49 
50 #include "gfx_v9_4.h"
51 #include "gfx_v9_0.h"
52 #include "gfx_v9_4_2.h"
53 
54 #include "asic_reg/pwr/pwr_10_0_offset.h"
55 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
56 #include "asic_reg/gc/gc_9_0_default.h"
57 
58 #define GFX9_NUM_GFX_RINGS     1
59 #define GFX9_MEC_HPD_SIZE 4096
60 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
61 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
62 
63 #define mmGCEA_PROBE_MAP                        0x070c
64 #define mmGCEA_PROBE_MAP_BASE_IDX               0
65 
66 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
67 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
69 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
71 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
72 
73 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
75 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
76 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
79 
80 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
81 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
83 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
85 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
86 
87 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
88 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
89 MODULE_FIRMWARE("amdgpu/raven_me.bin");
90 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
91 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
92 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
93 
94 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
95 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
97 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
100 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
101 
102 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
103 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
104 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
105 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
107 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
108 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
109 
110 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
111 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
112 
113 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
114 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
115 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
116 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
118 
119 MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
120 MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
121 MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
122 MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
123 MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
124 MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
125 
126 MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
127 MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
128 MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
129 MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec.bin");
130 MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
131 
132 #define mmTCP_CHAN_STEER_0_ARCT								0x0b03
133 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX							0
134 #define mmTCP_CHAN_STEER_1_ARCT								0x0b04
135 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX							0
136 #define mmTCP_CHAN_STEER_2_ARCT								0x0b09
137 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX							0
138 #define mmTCP_CHAN_STEER_3_ARCT								0x0b0a
139 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX							0
140 #define mmTCP_CHAN_STEER_4_ARCT								0x0b0b
141 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX							0
142 #define mmTCP_CHAN_STEER_5_ARCT								0x0b0c
143 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX							0
144 
145 #define mmGOLDEN_TSC_COUNT_UPPER_Renoir                0x0025
146 #define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX       1
147 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir                0x0026
148 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX       1
149 
150 enum ta_ras_gfx_subblock {
151 	/*CPC*/
152 	TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
153 	TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
154 	TA_RAS_BLOCK__GFX_CPC_UCODE,
155 	TA_RAS_BLOCK__GFX_DC_STATE_ME1,
156 	TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
157 	TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
158 	TA_RAS_BLOCK__GFX_DC_STATE_ME2,
159 	TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
160 	TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
161 	TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
162 	/* CPF*/
163 	TA_RAS_BLOCK__GFX_CPF_INDEX_START,
164 	TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
165 	TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
166 	TA_RAS_BLOCK__GFX_CPF_TAG,
167 	TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
168 	/* CPG*/
169 	TA_RAS_BLOCK__GFX_CPG_INDEX_START,
170 	TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
171 	TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
172 	TA_RAS_BLOCK__GFX_CPG_TAG,
173 	TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
174 	/* GDS*/
175 	TA_RAS_BLOCK__GFX_GDS_INDEX_START,
176 	TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
177 	TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
178 	TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
179 	TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
180 	TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
181 	TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
182 	/* SPI*/
183 	TA_RAS_BLOCK__GFX_SPI_SR_MEM,
184 	/* SQ*/
185 	TA_RAS_BLOCK__GFX_SQ_INDEX_START,
186 	TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
187 	TA_RAS_BLOCK__GFX_SQ_LDS_D,
188 	TA_RAS_BLOCK__GFX_SQ_LDS_I,
189 	TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
190 	TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
191 	/* SQC (3 ranges)*/
192 	TA_RAS_BLOCK__GFX_SQC_INDEX_START,
193 	/* SQC range 0*/
194 	TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
195 	TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
196 		TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
197 	TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
198 	TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
199 	TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
200 	TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
201 	TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
202 	TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
203 	TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
204 		TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
205 	/* SQC range 1*/
206 	TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
207 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
208 		TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
209 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
210 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
211 	TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
212 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
213 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
214 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
215 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
216 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
217 	TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
218 		TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
219 	/* SQC range 2*/
220 	TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
221 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
222 		TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
223 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
224 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
225 	TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
226 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
227 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
228 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
229 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
230 	TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
231 	TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
232 		TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
233 	TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
234 	/* TA*/
235 	TA_RAS_BLOCK__GFX_TA_INDEX_START,
236 	TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
237 	TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
238 	TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
239 	TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
240 	TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
241 	TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
242 	/* TCA*/
243 	TA_RAS_BLOCK__GFX_TCA_INDEX_START,
244 	TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
245 	TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
246 	TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
247 	/* TCC (5 sub-ranges)*/
248 	TA_RAS_BLOCK__GFX_TCC_INDEX_START,
249 	/* TCC range 0*/
250 	TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
251 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
252 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
253 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
254 	TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
255 	TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
256 	TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
257 	TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
258 	TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
259 	TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
260 	/* TCC range 1*/
261 	TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
262 	TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
263 	TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
264 	TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
265 		TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
266 	/* TCC range 2*/
267 	TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
268 	TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
269 	TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
270 	TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
271 	TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
272 	TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
273 	TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
274 	TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
275 	TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
276 	TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
277 		TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
278 	/* TCC range 3*/
279 	TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
280 	TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
281 	TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
282 	TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
283 		TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
284 	/* TCC range 4*/
285 	TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
286 	TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
287 		TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
288 	TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
289 	TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
290 		TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
291 	TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
292 	/* TCI*/
293 	TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
294 	/* TCP*/
295 	TA_RAS_BLOCK__GFX_TCP_INDEX_START,
296 	TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
297 	TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
298 	TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
299 	TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
300 	TA_RAS_BLOCK__GFX_TCP_DB_RAM,
301 	TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
302 	TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
303 	TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
304 	/* TD*/
305 	TA_RAS_BLOCK__GFX_TD_INDEX_START,
306 	TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
307 	TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
308 	TA_RAS_BLOCK__GFX_TD_CS_FIFO,
309 	TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
310 	/* EA (3 sub-ranges)*/
311 	TA_RAS_BLOCK__GFX_EA_INDEX_START,
312 	/* EA range 0*/
313 	TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
314 	TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
315 	TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
316 	TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
317 	TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
318 	TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
319 	TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
320 	TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
321 	TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
322 	TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
323 	/* EA range 1*/
324 	TA_RAS_BLOCK__GFX_EA_INDEX1_START,
325 	TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
326 	TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
327 	TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
328 	TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
329 	TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
330 	TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
331 	TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
332 	TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
333 	/* EA range 2*/
334 	TA_RAS_BLOCK__GFX_EA_INDEX2_START,
335 	TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
336 	TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
337 	TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
338 	TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
339 	TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
340 	TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
341 	/* UTC VM L2 bank*/
342 	TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
343 	/* UTC VM walker*/
344 	TA_RAS_BLOCK__UTC_VML2_WALKER,
345 	/* UTC ATC L2 2MB cache*/
346 	TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
347 	/* UTC ATC L2 4KB cache*/
348 	TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
349 	TA_RAS_BLOCK__GFX_MAX
350 };
351 
352 struct ras_gfx_subblock {
353 	unsigned char *name;
354 	int ta_subblock;
355 	int hw_supported_error_type;
356 	int sw_supported_error_type;
357 };
358 
359 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
360 	[AMDGPU_RAS_BLOCK__##subblock] = {                                     \
361 		#subblock,                                                     \
362 		TA_RAS_BLOCK__##subblock,                                      \
363 		((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
364 		(((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
365 	}
366 
367 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
368 	AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
369 	AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
370 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
371 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
372 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
373 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
374 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
375 	AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
376 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
377 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
378 	AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
379 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
380 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
381 	AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
382 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
383 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
384 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
385 			     0),
386 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
387 			     0),
388 	AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
389 	AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
390 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
391 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
392 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
393 	AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
394 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
395 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
396 			     0, 0),
397 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
398 			     0),
399 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
400 			     0, 0),
401 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
402 			     0),
403 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
404 			     0, 0),
405 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
406 			     0),
407 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
408 			     1),
409 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
410 			     0, 0, 0),
411 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
412 			     0),
413 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
414 			     0),
415 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
416 			     0),
417 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
418 			     0),
419 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
420 			     0),
421 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
422 			     0, 0),
423 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
424 			     0),
425 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
426 			     0),
427 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
428 			     0, 0, 0),
429 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
430 			     0),
431 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
432 			     0),
433 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
434 			     0),
435 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
436 			     0),
437 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
438 			     0),
439 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
440 			     0, 0),
441 	AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
442 			     0),
443 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
444 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
445 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
446 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
447 	AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
448 	AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
449 	AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
450 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
451 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
452 			     1),
453 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
454 			     1),
455 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
456 			     1),
457 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
458 			     0),
459 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
460 			     0),
461 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
462 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
463 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
464 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
465 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
466 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
467 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
468 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
469 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
470 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
471 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
472 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
473 			     0),
474 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
475 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
476 			     0),
477 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
478 			     0, 0),
479 	AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
480 			     0),
481 	AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
482 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
483 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
484 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
485 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
486 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
487 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
488 	AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
489 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
490 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
491 	AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
492 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
493 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
494 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
495 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
496 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
497 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
498 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
499 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
500 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
501 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
502 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
503 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
504 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
505 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
506 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
507 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
508 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
509 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
510 	AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
511 	AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
512 	AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
513 	AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
514 	AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
515 };
516 
517 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
518 {
519 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
520 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
521 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
522 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
523 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
524 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
525 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
526 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
527 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
528 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
529 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
530 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
531 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
532 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
533 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
534 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
535 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
536 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
537 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
538 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
539 };
540 
541 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
542 {
543 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
544 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
545 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
546 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
547 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
548 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
549 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
550 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
551 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
552 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
553 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
554 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
555 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
556 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
557 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
558 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
559 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
560 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
561 };
562 
563 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
564 {
565 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
566 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
567 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
568 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
569 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
570 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
571 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
572 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
573 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
574 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
575 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
576 };
577 
578 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
579 {
580 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
581 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
582 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
583 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
584 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
585 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
586 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
587 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
588 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
589 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
590 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
591 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
592 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
593 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
594 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
595 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
596 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
597 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
598 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
599 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
600 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
601 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
602 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
603 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
604 };
605 
606 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
607 {
608 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
609 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
610 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
611 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
612 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
613 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
614 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
615 };
616 
617 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
618 {
619 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
620 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
621 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
622 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
623 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
624 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
625 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
626 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
627 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
628 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
629 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
630 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
631 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
632 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
633 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
634 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
635 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
636 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
637 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
638 };
639 
640 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
641 {
642 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
643 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
644 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
645 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
646 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
647 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
648 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
649 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
650 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
651 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
652 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
653 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
654 };
655 
656 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
657 {
658 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
659 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
660 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
661 };
662 
663 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
664 {
665 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
666 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
667 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
668 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
669 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
670 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
671 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
672 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
673 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
674 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
675 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
676 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
677 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
678 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
679 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
680 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
681 };
682 
683 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
684 {
685 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
686 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
687 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
688 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
689 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
690 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
691 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
692 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
693 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
694 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
695 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
696 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
697 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
698 };
699 
700 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
701 {
702 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
703 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
704 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
705 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
706 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
707 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
708 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
709 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
710 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
711 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
712 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
713 };
714 
715 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
716 	{SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
717 	{SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
718 };
719 
720 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
721 {
722 	mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
723 	mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
724 	mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
725 	mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
726 	mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
727 	mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
728 	mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
729 	mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
730 };
731 
732 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
733 {
734 	mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
735 	mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
736 	mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
737 	mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
738 	mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
739 	mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
740 	mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
741 	mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
742 };
743 
744 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
745 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
746 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
747 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
748 
749 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
750 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
751 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
752 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
753 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
754 				struct amdgpu_cu_info *cu_info);
755 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
756 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
757 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
758 static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
759 					  void *ras_error_status);
760 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
761 				     void *inject_if);
762 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
763 
764 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
765 				uint64_t queue_mask)
766 {
767 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
768 	amdgpu_ring_write(kiq_ring,
769 		PACKET3_SET_RESOURCES_VMID_MASK(0) |
770 		/* vmid_mask:0* queue_type:0 (KIQ) */
771 		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
772 	amdgpu_ring_write(kiq_ring,
773 			lower_32_bits(queue_mask));	/* queue mask lo */
774 	amdgpu_ring_write(kiq_ring,
775 			upper_32_bits(queue_mask));	/* queue mask hi */
776 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
777 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
778 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
779 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
780 }
781 
782 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
783 				 struct amdgpu_ring *ring)
784 {
785 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
786 	uint64_t wptr_addr = ring->wptr_gpu_addr;
787 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
788 
789 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
790 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
791 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
792 			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
793 			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
794 			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
795 			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
796 			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
797 			 /*queue_type: normal compute queue */
798 			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
799 			 /* alloc format: all_on_one_pipe */
800 			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
801 			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
802 			 /* num_queues: must be 1 */
803 			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
804 	amdgpu_ring_write(kiq_ring,
805 			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
806 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
807 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
808 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
809 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
810 }
811 
812 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
813 				   struct amdgpu_ring *ring,
814 				   enum amdgpu_unmap_queues_action action,
815 				   u64 gpu_addr, u64 seq)
816 {
817 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
818 
819 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
820 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
821 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
822 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
823 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
824 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
825 	amdgpu_ring_write(kiq_ring,
826 			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
827 
828 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
829 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
830 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
831 		amdgpu_ring_write(kiq_ring, seq);
832 	} else {
833 		amdgpu_ring_write(kiq_ring, 0);
834 		amdgpu_ring_write(kiq_ring, 0);
835 		amdgpu_ring_write(kiq_ring, 0);
836 	}
837 }
838 
839 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
840 				   struct amdgpu_ring *ring,
841 				   u64 addr,
842 				   u64 seq)
843 {
844 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
845 
846 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
847 	amdgpu_ring_write(kiq_ring,
848 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
849 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
850 			  PACKET3_QUERY_STATUS_COMMAND(2));
851 	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
852 	amdgpu_ring_write(kiq_ring,
853 			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
854 			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
855 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
856 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
857 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
858 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
859 }
860 
861 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
862 				uint16_t pasid, uint32_t flush_type,
863 				bool all_hub)
864 {
865 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
866 	amdgpu_ring_write(kiq_ring,
867 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
868 			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
869 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
870 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
871 }
872 
873 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
874 	.kiq_set_resources = gfx_v9_0_kiq_set_resources,
875 	.kiq_map_queues = gfx_v9_0_kiq_map_queues,
876 	.kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
877 	.kiq_query_status = gfx_v9_0_kiq_query_status,
878 	.kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
879 	.set_resources_size = 8,
880 	.map_queues_size = 7,
881 	.unmap_queues_size = 6,
882 	.query_status_size = 7,
883 	.invalidate_tlbs_size = 2,
884 };
885 
886 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
887 {
888 	adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
889 }
890 
891 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
892 {
893 	switch (adev->ip_versions[GC_HWIP][0]) {
894 	case IP_VERSION(9, 0, 1):
895 		soc15_program_register_sequence(adev,
896 						golden_settings_gc_9_0,
897 						ARRAY_SIZE(golden_settings_gc_9_0));
898 		soc15_program_register_sequence(adev,
899 						golden_settings_gc_9_0_vg10,
900 						ARRAY_SIZE(golden_settings_gc_9_0_vg10));
901 		break;
902 	case IP_VERSION(9, 2, 1):
903 		soc15_program_register_sequence(adev,
904 						golden_settings_gc_9_2_1,
905 						ARRAY_SIZE(golden_settings_gc_9_2_1));
906 		soc15_program_register_sequence(adev,
907 						golden_settings_gc_9_2_1_vg12,
908 						ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
909 		break;
910 	case IP_VERSION(9, 4, 0):
911 		soc15_program_register_sequence(adev,
912 						golden_settings_gc_9_0,
913 						ARRAY_SIZE(golden_settings_gc_9_0));
914 		soc15_program_register_sequence(adev,
915 						golden_settings_gc_9_0_vg20,
916 						ARRAY_SIZE(golden_settings_gc_9_0_vg20));
917 		break;
918 	case IP_VERSION(9, 4, 1):
919 		soc15_program_register_sequence(adev,
920 						golden_settings_gc_9_4_1_arct,
921 						ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
922 		break;
923 	case IP_VERSION(9, 2, 2):
924 	case IP_VERSION(9, 1, 0):
925 		soc15_program_register_sequence(adev, golden_settings_gc_9_1,
926 						ARRAY_SIZE(golden_settings_gc_9_1));
927 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
928 			soc15_program_register_sequence(adev,
929 							golden_settings_gc_9_1_rv2,
930 							ARRAY_SIZE(golden_settings_gc_9_1_rv2));
931 		else
932 			soc15_program_register_sequence(adev,
933 							golden_settings_gc_9_1_rv1,
934 							ARRAY_SIZE(golden_settings_gc_9_1_rv1));
935 		break;
936 	 case IP_VERSION(9, 3, 0):
937 		soc15_program_register_sequence(adev,
938 						golden_settings_gc_9_1_rn,
939 						ARRAY_SIZE(golden_settings_gc_9_1_rn));
940 		return; /* for renoir, don't need common goldensetting */
941 	case IP_VERSION(9, 4, 2):
942 		gfx_v9_4_2_init_golden_registers(adev,
943 						 adev->smuio.funcs->get_die_id(adev));
944 		break;
945 	default:
946 		break;
947 	}
948 
949 	if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&
950 	    (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2)))
951 		soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
952 						(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
953 }
954 
955 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
956 				       bool wc, uint32_t reg, uint32_t val)
957 {
958 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
959 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
960 				WRITE_DATA_DST_SEL(0) |
961 				(wc ? WR_CONFIRM : 0));
962 	amdgpu_ring_write(ring, reg);
963 	amdgpu_ring_write(ring, 0);
964 	amdgpu_ring_write(ring, val);
965 }
966 
967 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
968 				  int mem_space, int opt, uint32_t addr0,
969 				  uint32_t addr1, uint32_t ref, uint32_t mask,
970 				  uint32_t inv)
971 {
972 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
973 	amdgpu_ring_write(ring,
974 				 /* memory (1) or register (0) */
975 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
976 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
977 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
978 				 WAIT_REG_MEM_ENGINE(eng_sel)));
979 
980 	if (mem_space)
981 		BUG_ON(addr0 & 0x3); /* Dword align */
982 	amdgpu_ring_write(ring, addr0);
983 	amdgpu_ring_write(ring, addr1);
984 	amdgpu_ring_write(ring, ref);
985 	amdgpu_ring_write(ring, mask);
986 	amdgpu_ring_write(ring, inv); /* poll interval */
987 }
988 
989 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
990 {
991 	struct amdgpu_device *adev = ring->adev;
992 	uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
993 	uint32_t tmp = 0;
994 	unsigned i;
995 	int r;
996 
997 	WREG32(scratch, 0xCAFEDEAD);
998 	r = amdgpu_ring_alloc(ring, 3);
999 	if (r)
1000 		return r;
1001 
1002 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1003 	amdgpu_ring_write(ring, scratch - PACKET3_SET_UCONFIG_REG_START);
1004 	amdgpu_ring_write(ring, 0xDEADBEEF);
1005 	amdgpu_ring_commit(ring);
1006 
1007 	for (i = 0; i < adev->usec_timeout; i++) {
1008 		tmp = RREG32(scratch);
1009 		if (tmp == 0xDEADBEEF)
1010 			break;
1011 		udelay(1);
1012 	}
1013 
1014 	if (i >= adev->usec_timeout)
1015 		r = -ETIMEDOUT;
1016 	return r;
1017 }
1018 
1019 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1020 {
1021 	struct amdgpu_device *adev = ring->adev;
1022 	struct amdgpu_ib ib;
1023 	struct dma_fence *f = NULL;
1024 
1025 	unsigned index;
1026 	uint64_t gpu_addr;
1027 	uint32_t tmp;
1028 	long r;
1029 
1030 	r = amdgpu_device_wb_get(adev, &index);
1031 	if (r)
1032 		return r;
1033 
1034 	gpu_addr = adev->wb.gpu_addr + (index * 4);
1035 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1036 	memset(&ib, 0, sizeof(ib));
1037 	r = amdgpu_ib_get(adev, NULL, 16,
1038 					AMDGPU_IB_POOL_DIRECT, &ib);
1039 	if (r)
1040 		goto err1;
1041 
1042 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1043 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1044 	ib.ptr[2] = lower_32_bits(gpu_addr);
1045 	ib.ptr[3] = upper_32_bits(gpu_addr);
1046 	ib.ptr[4] = 0xDEADBEEF;
1047 	ib.length_dw = 5;
1048 
1049 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1050 	if (r)
1051 		goto err2;
1052 
1053 	r = dma_fence_wait_timeout(f, false, timeout);
1054 	if (r == 0) {
1055 		r = -ETIMEDOUT;
1056 		goto err2;
1057 	} else if (r < 0) {
1058 		goto err2;
1059 	}
1060 
1061 	tmp = adev->wb.wb[index];
1062 	if (tmp == 0xDEADBEEF)
1063 		r = 0;
1064 	else
1065 		r = -EINVAL;
1066 
1067 err2:
1068 	amdgpu_ib_free(adev, &ib, NULL);
1069 	dma_fence_put(f);
1070 err1:
1071 	amdgpu_device_wb_free(adev, index);
1072 	return r;
1073 }
1074 
1075 
1076 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1077 {
1078 	release_firmware(adev->gfx.pfp_fw);
1079 	adev->gfx.pfp_fw = NULL;
1080 	release_firmware(adev->gfx.me_fw);
1081 	adev->gfx.me_fw = NULL;
1082 	release_firmware(adev->gfx.ce_fw);
1083 	adev->gfx.ce_fw = NULL;
1084 	release_firmware(adev->gfx.rlc_fw);
1085 	adev->gfx.rlc_fw = NULL;
1086 	release_firmware(adev->gfx.mec_fw);
1087 	adev->gfx.mec_fw = NULL;
1088 	release_firmware(adev->gfx.mec2_fw);
1089 	adev->gfx.mec2_fw = NULL;
1090 
1091 	kfree(adev->gfx.rlc.register_list_format);
1092 }
1093 
1094 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
1095 {
1096 	const struct rlc_firmware_header_v2_1 *rlc_hdr;
1097 
1098 	rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1099 	adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
1100 	adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
1101 	adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
1102 	adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
1103 	adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
1104 	adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
1105 	adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
1106 	adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
1107 	adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
1108 	adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
1109 	adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
1110 	adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
1111 	adev->gfx.rlc.reg_list_format_direct_reg_list_length =
1112 			le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
1113 }
1114 
1115 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1116 {
1117 	adev->gfx.me_fw_write_wait = false;
1118 	adev->gfx.mec_fw_write_wait = false;
1119 
1120 	if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&
1121 	    ((adev->gfx.mec_fw_version < 0x000001a5) ||
1122 	    (adev->gfx.mec_feature_version < 46) ||
1123 	    (adev->gfx.pfp_fw_version < 0x000000b7) ||
1124 	    (adev->gfx.pfp_feature_version < 46)))
1125 		DRM_WARN_ONCE("CP firmware version too old, please update!");
1126 
1127 	switch (adev->ip_versions[GC_HWIP][0]) {
1128 	case IP_VERSION(9, 0, 1):
1129 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1130 		    (adev->gfx.me_feature_version >= 42) &&
1131 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1132 		    (adev->gfx.pfp_feature_version >= 42))
1133 			adev->gfx.me_fw_write_wait = true;
1134 
1135 		if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1136 		    (adev->gfx.mec_feature_version >= 42))
1137 			adev->gfx.mec_fw_write_wait = true;
1138 		break;
1139 	case IP_VERSION(9, 2, 1):
1140 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1141 		    (adev->gfx.me_feature_version >= 44) &&
1142 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1143 		    (adev->gfx.pfp_feature_version >= 44))
1144 			adev->gfx.me_fw_write_wait = true;
1145 
1146 		if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1147 		    (adev->gfx.mec_feature_version >= 44))
1148 			adev->gfx.mec_fw_write_wait = true;
1149 		break;
1150 	case IP_VERSION(9, 4, 0):
1151 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1152 		    (adev->gfx.me_feature_version >= 44) &&
1153 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1154 		    (adev->gfx.pfp_feature_version >= 44))
1155 			adev->gfx.me_fw_write_wait = true;
1156 
1157 		if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1158 		    (adev->gfx.mec_feature_version >= 44))
1159 			adev->gfx.mec_fw_write_wait = true;
1160 		break;
1161 	case IP_VERSION(9, 1, 0):
1162 	case IP_VERSION(9, 2, 2):
1163 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1164 		    (adev->gfx.me_feature_version >= 42) &&
1165 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1166 		    (adev->gfx.pfp_feature_version >= 42))
1167 			adev->gfx.me_fw_write_wait = true;
1168 
1169 		if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1170 		    (adev->gfx.mec_feature_version >= 42))
1171 			adev->gfx.mec_fw_write_wait = true;
1172 		break;
1173 	default:
1174 		adev->gfx.me_fw_write_wait = true;
1175 		adev->gfx.mec_fw_write_wait = true;
1176 		break;
1177 	}
1178 }
1179 
1180 struct amdgpu_gfxoff_quirk {
1181 	u16 chip_vendor;
1182 	u16 chip_device;
1183 	u16 subsys_vendor;
1184 	u16 subsys_device;
1185 	u8 revision;
1186 };
1187 
1188 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1189 	/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1190 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1191 	/* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
1192 	{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1193 	/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
1194 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1195 	/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
1196 	{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
1197 	{ 0, 0, 0, 0, 0 },
1198 };
1199 
1200 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1201 {
1202 	const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1203 
1204 	while (p && p->chip_device != 0) {
1205 		if (pdev->vendor == p->chip_vendor &&
1206 		    pdev->device == p->chip_device &&
1207 		    pdev->subsystem_vendor == p->subsys_vendor &&
1208 		    pdev->subsystem_device == p->subsys_device &&
1209 		    pdev->revision == p->revision) {
1210 			return true;
1211 		}
1212 		++p;
1213 	}
1214 	return false;
1215 }
1216 
1217 static bool is_raven_kicker(struct amdgpu_device *adev)
1218 {
1219 	if (adev->pm.fw_version >= 0x41e2b)
1220 		return true;
1221 	else
1222 		return false;
1223 }
1224 
1225 static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
1226 {
1227 	if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) &&
1228 	    (adev->gfx.me_fw_version >= 0x000000a5) &&
1229 	    (adev->gfx.me_feature_version >= 52))
1230 		return true;
1231 	else
1232 		return false;
1233 }
1234 
1235 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1236 {
1237 	if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1238 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1239 
1240 	switch (adev->ip_versions[GC_HWIP][0]) {
1241 	case IP_VERSION(9, 0, 1):
1242 	case IP_VERSION(9, 2, 1):
1243 	case IP_VERSION(9, 4, 0):
1244 		break;
1245 	case IP_VERSION(9, 2, 2):
1246 	case IP_VERSION(9, 1, 0):
1247 		if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1248 		      (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1249 		    ((!is_raven_kicker(adev) &&
1250 		      adev->gfx.rlc_fw_version < 531) ||
1251 		     (adev->gfx.rlc_feature_version < 1) ||
1252 		     !adev->gfx.rlc.is_rlc_v2_1))
1253 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1254 
1255 		if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1256 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1257 				AMD_PG_SUPPORT_CP |
1258 				AMD_PG_SUPPORT_RLC_SMU_HS;
1259 		break;
1260 	case IP_VERSION(9, 3, 0):
1261 		if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1262 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1263 				AMD_PG_SUPPORT_CP |
1264 				AMD_PG_SUPPORT_RLC_SMU_HS;
1265 		break;
1266 	default:
1267 		break;
1268 	}
1269 }
1270 
1271 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1272 					  const char *chip_name)
1273 {
1274 	char fw_name[30];
1275 	int err;
1276 	struct amdgpu_firmware_info *info = NULL;
1277 	const struct common_firmware_header *header = NULL;
1278 	const struct gfx_firmware_header_v1_0 *cp_hdr;
1279 
1280 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1281 	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1282 	if (err)
1283 		goto out;
1284 	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1285 	if (err)
1286 		goto out;
1287 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1288 	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1289 	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1290 
1291 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1292 	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1293 	if (err)
1294 		goto out;
1295 	err = amdgpu_ucode_validate(adev->gfx.me_fw);
1296 	if (err)
1297 		goto out;
1298 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1299 	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1300 	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1301 
1302 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1303 	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1304 	if (err)
1305 		goto out;
1306 	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1307 	if (err)
1308 		goto out;
1309 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1310 	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1311 	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1312 
1313 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1314 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1315 		info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1316 		info->fw = adev->gfx.pfp_fw;
1317 		header = (const struct common_firmware_header *)info->fw->data;
1318 		adev->firmware.fw_size +=
1319 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1320 
1321 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1322 		info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1323 		info->fw = adev->gfx.me_fw;
1324 		header = (const struct common_firmware_header *)info->fw->data;
1325 		adev->firmware.fw_size +=
1326 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1327 
1328 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1329 		info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1330 		info->fw = adev->gfx.ce_fw;
1331 		header = (const struct common_firmware_header *)info->fw->data;
1332 		adev->firmware.fw_size +=
1333 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1334 	}
1335 
1336 out:
1337 	if (err) {
1338 		dev_err(adev->dev,
1339 			"gfx9: Failed to load firmware \"%s\"\n",
1340 			fw_name);
1341 		release_firmware(adev->gfx.pfp_fw);
1342 		adev->gfx.pfp_fw = NULL;
1343 		release_firmware(adev->gfx.me_fw);
1344 		adev->gfx.me_fw = NULL;
1345 		release_firmware(adev->gfx.ce_fw);
1346 		adev->gfx.ce_fw = NULL;
1347 	}
1348 	return err;
1349 }
1350 
1351 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1352 					  const char *chip_name)
1353 {
1354 	char fw_name[30];
1355 	int err;
1356 	struct amdgpu_firmware_info *info = NULL;
1357 	const struct common_firmware_header *header = NULL;
1358 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
1359 	unsigned int *tmp = NULL;
1360 	unsigned int i = 0;
1361 	uint16_t version_major;
1362 	uint16_t version_minor;
1363 	uint32_t smu_version;
1364 
1365 	/*
1366 	 * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1367 	 * instead of picasso_rlc.bin.
1368 	 * Judgment method:
1369 	 * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1370 	 *          or revision >= 0xD8 && revision <= 0xDF
1371 	 * otherwise is PCO FP5
1372 	 */
1373 	if (!strcmp(chip_name, "picasso") &&
1374 		(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1375 		((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1376 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1377 	else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1378 		(smu_version >= 0x41e2b))
1379 		/**
1380 		*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1381 		*/
1382 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1383 	else
1384 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1385 	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1386 	if (err)
1387 		goto out;
1388 	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1389 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1390 
1391 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1392 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1393 	if (version_major == 2 && version_minor == 1)
1394 		adev->gfx.rlc.is_rlc_v2_1 = true;
1395 
1396 	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1397 	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1398 	adev->gfx.rlc.save_and_restore_offset =
1399 			le32_to_cpu(rlc_hdr->save_and_restore_offset);
1400 	adev->gfx.rlc.clear_state_descriptor_offset =
1401 			le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1402 	adev->gfx.rlc.avail_scratch_ram_locations =
1403 			le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1404 	adev->gfx.rlc.reg_restore_list_size =
1405 			le32_to_cpu(rlc_hdr->reg_restore_list_size);
1406 	adev->gfx.rlc.reg_list_format_start =
1407 			le32_to_cpu(rlc_hdr->reg_list_format_start);
1408 	adev->gfx.rlc.reg_list_format_separate_start =
1409 			le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1410 	adev->gfx.rlc.starting_offsets_start =
1411 			le32_to_cpu(rlc_hdr->starting_offsets_start);
1412 	adev->gfx.rlc.reg_list_format_size_bytes =
1413 			le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1414 	adev->gfx.rlc.reg_list_size_bytes =
1415 			le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1416 	adev->gfx.rlc.register_list_format =
1417 			kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1418 				adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1419 	if (!adev->gfx.rlc.register_list_format) {
1420 		err = -ENOMEM;
1421 		goto out;
1422 	}
1423 
1424 	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1425 			le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1426 	for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1427 		adev->gfx.rlc.register_list_format[i] =	le32_to_cpu(tmp[i]);
1428 
1429 	adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1430 
1431 	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1432 			le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1433 	for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1434 		adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1435 
1436 	if (adev->gfx.rlc.is_rlc_v2_1)
1437 		gfx_v9_0_init_rlc_ext_microcode(adev);
1438 
1439 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1440 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1441 		info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1442 		info->fw = adev->gfx.rlc_fw;
1443 		header = (const struct common_firmware_header *)info->fw->data;
1444 		adev->firmware.fw_size +=
1445 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1446 
1447 		if (adev->gfx.rlc.is_rlc_v2_1 &&
1448 		    adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1449 		    adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1450 		    adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1451 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1452 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1453 			info->fw = adev->gfx.rlc_fw;
1454 			adev->firmware.fw_size +=
1455 				ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1456 
1457 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1458 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1459 			info->fw = adev->gfx.rlc_fw;
1460 			adev->firmware.fw_size +=
1461 				ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1462 
1463 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1464 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1465 			info->fw = adev->gfx.rlc_fw;
1466 			adev->firmware.fw_size +=
1467 				ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1468 		}
1469 	}
1470 
1471 out:
1472 	if (err) {
1473 		dev_err(adev->dev,
1474 			"gfx9: Failed to load firmware \"%s\"\n",
1475 			fw_name);
1476 		release_firmware(adev->gfx.rlc_fw);
1477 		adev->gfx.rlc_fw = NULL;
1478 	}
1479 	return err;
1480 }
1481 
1482 static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
1483 {
1484 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
1485 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
1486 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0))
1487 		return false;
1488 
1489 	return true;
1490 }
1491 
1492 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1493 					  const char *chip_name)
1494 {
1495 	char fw_name[30];
1496 	int err;
1497 	struct amdgpu_firmware_info *info = NULL;
1498 	const struct common_firmware_header *header = NULL;
1499 	const struct gfx_firmware_header_v1_0 *cp_hdr;
1500 
1501 	if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
1502 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec.bin", chip_name);
1503 	else
1504 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1505 
1506 	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1507 	if (err)
1508 		goto out;
1509 	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1510 	if (err)
1511 		goto out;
1512 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1513 	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1514 	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1515 
1516 
1517 	if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1518 		if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
1519 			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec2.bin", chip_name);
1520 		else
1521 			snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1522 
1523 		err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1524 		if (!err) {
1525 			err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1526 			if (err)
1527 				goto out;
1528 			cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1529 			adev->gfx.mec2_fw->data;
1530 			adev->gfx.mec2_fw_version =
1531 			le32_to_cpu(cp_hdr->header.ucode_version);
1532 			adev->gfx.mec2_feature_version =
1533 			le32_to_cpu(cp_hdr->ucode_feature_version);
1534 		} else {
1535 			err = 0;
1536 			adev->gfx.mec2_fw = NULL;
1537 		}
1538 	} else {
1539 		adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
1540 		adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
1541 	}
1542 
1543 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1544 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1545 		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1546 		info->fw = adev->gfx.mec_fw;
1547 		header = (const struct common_firmware_header *)info->fw->data;
1548 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1549 		adev->firmware.fw_size +=
1550 			ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1551 
1552 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
1553 		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
1554 		info->fw = adev->gfx.mec_fw;
1555 		adev->firmware.fw_size +=
1556 			ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1557 
1558 		if (adev->gfx.mec2_fw) {
1559 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1560 			info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1561 			info->fw = adev->gfx.mec2_fw;
1562 			header = (const struct common_firmware_header *)info->fw->data;
1563 			cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1564 			adev->firmware.fw_size +=
1565 				ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1566 
1567 			/* TODO: Determine if MEC2 JT FW loading can be removed
1568 				 for all GFX V9 asic and above */
1569 			if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
1570 				info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1571 				info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1572 				info->fw = adev->gfx.mec2_fw;
1573 				adev->firmware.fw_size +=
1574 					ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1575 					PAGE_SIZE);
1576 			}
1577 		}
1578 	}
1579 
1580 out:
1581 	gfx_v9_0_check_if_need_gfxoff(adev);
1582 	gfx_v9_0_check_fw_write_wait(adev);
1583 	if (err) {
1584 		dev_err(adev->dev,
1585 			"gfx9: Failed to load firmware \"%s\"\n",
1586 			fw_name);
1587 		release_firmware(adev->gfx.mec_fw);
1588 		adev->gfx.mec_fw = NULL;
1589 		release_firmware(adev->gfx.mec2_fw);
1590 		adev->gfx.mec2_fw = NULL;
1591 	}
1592 	return err;
1593 }
1594 
1595 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1596 {
1597 	const char *chip_name;
1598 	int r;
1599 
1600 	DRM_DEBUG("\n");
1601 
1602 	switch (adev->ip_versions[GC_HWIP][0]) {
1603 	case IP_VERSION(9, 0, 1):
1604 		chip_name = "vega10";
1605 		break;
1606 	case IP_VERSION(9, 2, 1):
1607 		chip_name = "vega12";
1608 		break;
1609 	case IP_VERSION(9, 4, 0):
1610 		chip_name = "vega20";
1611 		break;
1612 	case IP_VERSION(9, 2, 2):
1613 	case IP_VERSION(9, 1, 0):
1614 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1615 			chip_name = "raven2";
1616 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1617 			chip_name = "picasso";
1618 		else
1619 			chip_name = "raven";
1620 		break;
1621 	case IP_VERSION(9, 4, 1):
1622 		chip_name = "arcturus";
1623 		break;
1624 	case IP_VERSION(9, 3, 0):
1625 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1626 			chip_name = "renoir";
1627 		else
1628 			chip_name = "green_sardine";
1629 		break;
1630 	case IP_VERSION(9, 4, 2):
1631 		chip_name = "aldebaran";
1632 		break;
1633 	default:
1634 		BUG();
1635 	}
1636 
1637 	/* No CPG in Arcturus */
1638 	if (adev->gfx.num_gfx_rings) {
1639 		r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1640 		if (r)
1641 			return r;
1642 	}
1643 
1644 	r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1645 	if (r)
1646 		return r;
1647 
1648 	r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1649 	if (r)
1650 		return r;
1651 
1652 	return r;
1653 }
1654 
1655 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1656 {
1657 	u32 count = 0;
1658 	const struct cs_section_def *sect = NULL;
1659 	const struct cs_extent_def *ext = NULL;
1660 
1661 	/* begin clear state */
1662 	count += 2;
1663 	/* context control state */
1664 	count += 3;
1665 
1666 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1667 		for (ext = sect->section; ext->extent != NULL; ++ext) {
1668 			if (sect->id == SECT_CONTEXT)
1669 				count += 2 + ext->reg_count;
1670 			else
1671 				return 0;
1672 		}
1673 	}
1674 
1675 	/* end clear state */
1676 	count += 2;
1677 	/* clear state */
1678 	count += 2;
1679 
1680 	return count;
1681 }
1682 
1683 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1684 				    volatile u32 *buffer)
1685 {
1686 	u32 count = 0, i;
1687 	const struct cs_section_def *sect = NULL;
1688 	const struct cs_extent_def *ext = NULL;
1689 
1690 	if (adev->gfx.rlc.cs_data == NULL)
1691 		return;
1692 	if (buffer == NULL)
1693 		return;
1694 
1695 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1696 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1697 
1698 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1699 	buffer[count++] = cpu_to_le32(0x80000000);
1700 	buffer[count++] = cpu_to_le32(0x80000000);
1701 
1702 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1703 		for (ext = sect->section; ext->extent != NULL; ++ext) {
1704 			if (sect->id == SECT_CONTEXT) {
1705 				buffer[count++] =
1706 					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1707 				buffer[count++] = cpu_to_le32(ext->reg_index -
1708 						PACKET3_SET_CONTEXT_REG_START);
1709 				for (i = 0; i < ext->reg_count; i++)
1710 					buffer[count++] = cpu_to_le32(ext->extent[i]);
1711 			} else {
1712 				return;
1713 			}
1714 		}
1715 	}
1716 
1717 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1718 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1719 
1720 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1721 	buffer[count++] = cpu_to_le32(0);
1722 }
1723 
1724 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1725 {
1726 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1727 	uint32_t pg_always_on_cu_num = 2;
1728 	uint32_t always_on_cu_num;
1729 	uint32_t i, j, k;
1730 	uint32_t mask, cu_bitmap, counter;
1731 
1732 	if (adev->flags & AMD_IS_APU)
1733 		always_on_cu_num = 4;
1734 	else if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1))
1735 		always_on_cu_num = 8;
1736 	else
1737 		always_on_cu_num = 12;
1738 
1739 	mutex_lock(&adev->grbm_idx_mutex);
1740 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1741 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1742 			mask = 1;
1743 			cu_bitmap = 0;
1744 			counter = 0;
1745 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1746 
1747 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1748 				if (cu_info->bitmap[i][j] & mask) {
1749 					if (counter == pg_always_on_cu_num)
1750 						WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1751 					if (counter < always_on_cu_num)
1752 						cu_bitmap |= mask;
1753 					else
1754 						break;
1755 					counter++;
1756 				}
1757 				mask <<= 1;
1758 			}
1759 
1760 			WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1761 			cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1762 		}
1763 	}
1764 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1765 	mutex_unlock(&adev->grbm_idx_mutex);
1766 }
1767 
1768 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1769 {
1770 	uint32_t data;
1771 
1772 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1773 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1774 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1775 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1776 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1777 
1778 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1779 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1780 
1781 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1782 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1783 
1784 	mutex_lock(&adev->grbm_idx_mutex);
1785 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1786 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1787 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1788 
1789 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
1790 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1791 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1792 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1793 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1794 
1795 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1796 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1797 	data &= 0x0000FFFF;
1798 	data |= 0x00C00000;
1799 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1800 
1801 	/*
1802 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1803 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1804 	 */
1805 
1806 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1807 	 * but used for RLC_LB_CNTL configuration */
1808 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1809 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1810 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1811 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1812 	mutex_unlock(&adev->grbm_idx_mutex);
1813 
1814 	gfx_v9_0_init_always_on_cu_mask(adev);
1815 }
1816 
1817 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1818 {
1819 	uint32_t data;
1820 
1821 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1822 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1823 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1824 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1825 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1826 
1827 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1828 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1829 
1830 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1831 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1832 
1833 	mutex_lock(&adev->grbm_idx_mutex);
1834 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1835 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1836 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1837 
1838 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
1839 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1840 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1841 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1842 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1843 
1844 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1845 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1846 	data &= 0x0000FFFF;
1847 	data |= 0x00C00000;
1848 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1849 
1850 	/*
1851 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1852 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1853 	 */
1854 
1855 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1856 	 * but used for RLC_LB_CNTL configuration */
1857 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1858 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1859 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1860 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1861 	mutex_unlock(&adev->grbm_idx_mutex);
1862 
1863 	gfx_v9_0_init_always_on_cu_mask(adev);
1864 }
1865 
1866 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1867 {
1868 	WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1869 }
1870 
1871 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1872 {
1873 	if (gfx_v9_0_load_mec2_fw_bin_support(adev))
1874 		return 5;
1875 	else
1876 		return 4;
1877 }
1878 
1879 static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1880 {
1881 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1882 
1883 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
1884 	reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
1885 	reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
1886 	reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
1887 	reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);
1888 	reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
1889 	reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
1890 	reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
1891 	adev->gfx.rlc.rlcg_reg_access_supported = true;
1892 }
1893 
1894 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1895 {
1896 	const struct cs_section_def *cs_data;
1897 	int r;
1898 
1899 	adev->gfx.rlc.cs_data = gfx9_cs_data;
1900 
1901 	cs_data = adev->gfx.rlc.cs_data;
1902 
1903 	if (cs_data) {
1904 		/* init clear state block */
1905 		r = amdgpu_gfx_rlc_init_csb(adev);
1906 		if (r)
1907 			return r;
1908 	}
1909 
1910 	if (adev->flags & AMD_IS_APU) {
1911 		/* TODO: double check the cp_table_size for RV */
1912 		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1913 		r = amdgpu_gfx_rlc_init_cpt(adev);
1914 		if (r)
1915 			return r;
1916 	}
1917 
1918 	switch (adev->ip_versions[GC_HWIP][0]) {
1919 	case IP_VERSION(9, 2, 2):
1920 	case IP_VERSION(9, 1, 0):
1921 		gfx_v9_0_init_lbpw(adev);
1922 		break;
1923 	case IP_VERSION(9, 4, 0):
1924 		gfx_v9_4_init_lbpw(adev);
1925 		break;
1926 	default:
1927 		break;
1928 	}
1929 
1930 	/* init spm vmid with 0xf */
1931 	if (adev->gfx.rlc.funcs->update_spm_vmid)
1932 		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1933 
1934 	return 0;
1935 }
1936 
1937 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1938 {
1939 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1940 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1941 }
1942 
1943 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1944 {
1945 	int r;
1946 	u32 *hpd;
1947 	const __le32 *fw_data;
1948 	unsigned fw_size;
1949 	u32 *fw;
1950 	size_t mec_hpd_size;
1951 
1952 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1953 
1954 	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1955 
1956 	/* take ownership of the relevant compute queues */
1957 	amdgpu_gfx_compute_queue_acquire(adev);
1958 	mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1959 	if (mec_hpd_size) {
1960 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1961 					      AMDGPU_GEM_DOMAIN_VRAM,
1962 					      &adev->gfx.mec.hpd_eop_obj,
1963 					      &adev->gfx.mec.hpd_eop_gpu_addr,
1964 					      (void **)&hpd);
1965 		if (r) {
1966 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1967 			gfx_v9_0_mec_fini(adev);
1968 			return r;
1969 		}
1970 
1971 		memset(hpd, 0, mec_hpd_size);
1972 
1973 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1974 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1975 	}
1976 
1977 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1978 
1979 	fw_data = (const __le32 *)
1980 		(adev->gfx.mec_fw->data +
1981 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1982 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1983 
1984 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1985 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1986 				      &adev->gfx.mec.mec_fw_obj,
1987 				      &adev->gfx.mec.mec_fw_gpu_addr,
1988 				      (void **)&fw);
1989 	if (r) {
1990 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1991 		gfx_v9_0_mec_fini(adev);
1992 		return r;
1993 	}
1994 
1995 	memcpy(fw, fw_data, fw_size);
1996 
1997 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1998 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1999 
2000 	return 0;
2001 }
2002 
2003 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
2004 {
2005 	WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
2006 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2007 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2008 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
2009 		(SQ_IND_INDEX__FORCE_READ_MASK));
2010 	return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2011 }
2012 
2013 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
2014 			   uint32_t wave, uint32_t thread,
2015 			   uint32_t regno, uint32_t num, uint32_t *out)
2016 {
2017 	WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
2018 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2019 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2020 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
2021 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
2022 		(SQ_IND_INDEX__FORCE_READ_MASK) |
2023 		(SQ_IND_INDEX__AUTO_INCR_MASK));
2024 	while (num--)
2025 		*(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2026 }
2027 
2028 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
2029 {
2030 	/* type 1 wave data */
2031 	dst[(*no_fields)++] = 1;
2032 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
2033 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
2034 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
2035 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
2036 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
2037 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
2038 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
2039 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
2040 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
2041 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
2042 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
2043 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
2044 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
2045 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
2046 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
2047 }
2048 
2049 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
2050 				     uint32_t wave, uint32_t start,
2051 				     uint32_t size, uint32_t *dst)
2052 {
2053 	wave_read_regs(
2054 		adev, simd, wave, 0,
2055 		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
2056 }
2057 
2058 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
2059 				     uint32_t wave, uint32_t thread,
2060 				     uint32_t start, uint32_t size,
2061 				     uint32_t *dst)
2062 {
2063 	wave_read_regs(
2064 		adev, simd, wave, thread,
2065 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
2066 }
2067 
2068 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
2069 				  u32 me, u32 pipe, u32 q, u32 vm)
2070 {
2071 	soc15_grbm_select(adev, me, pipe, q, vm);
2072 }
2073 
2074 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2075         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2076         .select_se_sh = &gfx_v9_0_select_se_sh,
2077         .read_wave_data = &gfx_v9_0_read_wave_data,
2078         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2079         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2080         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2081 };
2082 
2083 const struct amdgpu_ras_block_hw_ops  gfx_v9_0_ras_ops = {
2084 		.ras_error_inject = &gfx_v9_0_ras_error_inject,
2085 		.query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2086 		.reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2087 };
2088 
2089 static struct amdgpu_gfx_ras gfx_v9_0_ras = {
2090 	.ras_block = {
2091 		.hw_ops = &gfx_v9_0_ras_ops,
2092 	},
2093 };
2094 
2095 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2096 {
2097 	u32 gb_addr_config;
2098 	int err;
2099 
2100 	adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
2101 
2102 	switch (adev->ip_versions[GC_HWIP][0]) {
2103 	case IP_VERSION(9, 0, 1):
2104 		adev->gfx.config.max_hw_contexts = 8;
2105 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2106 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2107 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2108 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2109 		gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2110 		break;
2111 	case IP_VERSION(9, 2, 1):
2112 		adev->gfx.config.max_hw_contexts = 8;
2113 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2114 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2115 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2116 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2117 		gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2118 		DRM_INFO("fix gfx.config for vega12\n");
2119 		break;
2120 	case IP_VERSION(9, 4, 0):
2121 		adev->gfx.ras = &gfx_v9_0_ras;
2122 		adev->gfx.config.max_hw_contexts = 8;
2123 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2124 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2125 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2126 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2127 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2128 		gb_addr_config &= ~0xf3e777ff;
2129 		gb_addr_config |= 0x22014042;
2130 		/* check vbios table if gpu info is not available */
2131 		err = amdgpu_atomfirmware_get_gfx_info(adev);
2132 		if (err)
2133 			return err;
2134 		break;
2135 	case IP_VERSION(9, 2, 2):
2136 	case IP_VERSION(9, 1, 0):
2137 		adev->gfx.config.max_hw_contexts = 8;
2138 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2139 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2140 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2141 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2142 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2143 			gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2144 		else
2145 			gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2146 		break;
2147 	case IP_VERSION(9, 4, 1):
2148 		adev->gfx.ras = &gfx_v9_4_ras;
2149 		adev->gfx.config.max_hw_contexts = 8;
2150 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2151 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2152 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2153 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2154 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2155 		gb_addr_config &= ~0xf3e777ff;
2156 		gb_addr_config |= 0x22014042;
2157 		break;
2158 	case IP_VERSION(9, 3, 0):
2159 		adev->gfx.config.max_hw_contexts = 8;
2160 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2161 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2162 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2163 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2164 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2165 		gb_addr_config &= ~0xf3e777ff;
2166 		gb_addr_config |= 0x22010042;
2167 		break;
2168 	case IP_VERSION(9, 4, 2):
2169 		adev->gfx.ras = &gfx_v9_4_2_ras;
2170 		adev->gfx.config.max_hw_contexts = 8;
2171 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2172 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2173 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2174 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2175 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2176 		gb_addr_config &= ~0xf3e777ff;
2177 		gb_addr_config |= 0x22014042;
2178 		/* check vbios table if gpu info is not available */
2179 		err = amdgpu_atomfirmware_get_gfx_info(adev);
2180 		if (err)
2181 			return err;
2182 		break;
2183 	default:
2184 		BUG();
2185 		break;
2186 	}
2187 
2188 	if (adev->gfx.ras) {
2189 		err = amdgpu_ras_register_ras_block(adev, &adev->gfx.ras->ras_block);
2190 		if (err) {
2191 			DRM_ERROR("Failed to register gfx ras block!\n");
2192 			return err;
2193 		}
2194 
2195 		strcpy(adev->gfx.ras->ras_block.ras_comm.name, "gfx");
2196 		adev->gfx.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
2197 		adev->gfx.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
2198 		adev->gfx.ras_if = &adev->gfx.ras->ras_block.ras_comm;
2199 
2200 		/* If not define special ras_late_init function, use gfx default ras_late_init */
2201 		if (!adev->gfx.ras->ras_block.ras_late_init)
2202 			adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
2203 
2204 		/* If not defined special ras_cb function, use default ras_cb */
2205 		if (!adev->gfx.ras->ras_block.ras_cb)
2206 			adev->gfx.ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
2207 	}
2208 
2209 	adev->gfx.config.gb_addr_config = gb_addr_config;
2210 
2211 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2212 			REG_GET_FIELD(
2213 					adev->gfx.config.gb_addr_config,
2214 					GB_ADDR_CONFIG,
2215 					NUM_PIPES);
2216 
2217 	adev->gfx.config.max_tile_pipes =
2218 		adev->gfx.config.gb_addr_config_fields.num_pipes;
2219 
2220 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2221 			REG_GET_FIELD(
2222 					adev->gfx.config.gb_addr_config,
2223 					GB_ADDR_CONFIG,
2224 					NUM_BANKS);
2225 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2226 			REG_GET_FIELD(
2227 					adev->gfx.config.gb_addr_config,
2228 					GB_ADDR_CONFIG,
2229 					MAX_COMPRESSED_FRAGS);
2230 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2231 			REG_GET_FIELD(
2232 					adev->gfx.config.gb_addr_config,
2233 					GB_ADDR_CONFIG,
2234 					NUM_RB_PER_SE);
2235 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2236 			REG_GET_FIELD(
2237 					adev->gfx.config.gb_addr_config,
2238 					GB_ADDR_CONFIG,
2239 					NUM_SHADER_ENGINES);
2240 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2241 			REG_GET_FIELD(
2242 					adev->gfx.config.gb_addr_config,
2243 					GB_ADDR_CONFIG,
2244 					PIPE_INTERLEAVE_SIZE));
2245 
2246 	return 0;
2247 }
2248 
2249 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2250 				      int mec, int pipe, int queue)
2251 {
2252 	unsigned irq_type;
2253 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2254 	unsigned int hw_prio;
2255 
2256 	ring = &adev->gfx.compute_ring[ring_id];
2257 
2258 	/* mec0 is me1 */
2259 	ring->me = mec + 1;
2260 	ring->pipe = pipe;
2261 	ring->queue = queue;
2262 
2263 	ring->ring_obj = NULL;
2264 	ring->use_doorbell = true;
2265 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2266 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2267 				+ (ring_id * GFX9_MEC_HPD_SIZE);
2268 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2269 
2270 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2271 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2272 		+ ring->pipe;
2273 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
2274 			AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
2275 	/* type-2 packets are deprecated on MEC, use type-3 instead */
2276 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
2277 				hw_prio, NULL);
2278 }
2279 
2280 static int gfx_v9_0_sw_init(void *handle)
2281 {
2282 	int i, j, k, r, ring_id;
2283 	struct amdgpu_ring *ring;
2284 	struct amdgpu_kiq *kiq;
2285 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2286 
2287 	switch (adev->ip_versions[GC_HWIP][0]) {
2288 	case IP_VERSION(9, 0, 1):
2289 	case IP_VERSION(9, 2, 1):
2290 	case IP_VERSION(9, 4, 0):
2291 	case IP_VERSION(9, 2, 2):
2292 	case IP_VERSION(9, 1, 0):
2293 	case IP_VERSION(9, 4, 1):
2294 	case IP_VERSION(9, 3, 0):
2295 	case IP_VERSION(9, 4, 2):
2296 		adev->gfx.mec.num_mec = 2;
2297 		break;
2298 	default:
2299 		adev->gfx.mec.num_mec = 1;
2300 		break;
2301 	}
2302 
2303 	adev->gfx.mec.num_pipe_per_mec = 4;
2304 	adev->gfx.mec.num_queue_per_pipe = 8;
2305 
2306 	/* EOP Event */
2307 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2308 	if (r)
2309 		return r;
2310 
2311 	/* Privileged reg */
2312 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2313 			      &adev->gfx.priv_reg_irq);
2314 	if (r)
2315 		return r;
2316 
2317 	/* Privileged inst */
2318 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2319 			      &adev->gfx.priv_inst_irq);
2320 	if (r)
2321 		return r;
2322 
2323 	/* ECC error */
2324 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2325 			      &adev->gfx.cp_ecc_error_irq);
2326 	if (r)
2327 		return r;
2328 
2329 	/* FUE error */
2330 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2331 			      &adev->gfx.cp_ecc_error_irq);
2332 	if (r)
2333 		return r;
2334 
2335 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2336 
2337 	r = gfx_v9_0_init_microcode(adev);
2338 	if (r) {
2339 		DRM_ERROR("Failed to load gfx firmware!\n");
2340 		return r;
2341 	}
2342 
2343 	if (adev->gfx.rlc.funcs) {
2344 		if (adev->gfx.rlc.funcs->init) {
2345 			r = adev->gfx.rlc.funcs->init(adev);
2346 			if (r) {
2347 				dev_err(adev->dev, "Failed to init rlc BOs!\n");
2348 				return r;
2349 			}
2350 		}
2351 	}
2352 
2353 	r = gfx_v9_0_mec_init(adev);
2354 	if (r) {
2355 		DRM_ERROR("Failed to init MEC BOs!\n");
2356 		return r;
2357 	}
2358 
2359 	/* set up the gfx ring */
2360 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2361 		ring = &adev->gfx.gfx_ring[i];
2362 		ring->ring_obj = NULL;
2363 		if (!i)
2364 			sprintf(ring->name, "gfx");
2365 		else
2366 			sprintf(ring->name, "gfx_%d", i);
2367 		ring->use_doorbell = true;
2368 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2369 		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2370 				     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2371 				     AMDGPU_RING_PRIO_DEFAULT, NULL);
2372 		if (r)
2373 			return r;
2374 	}
2375 
2376 	/* set up the compute queues - allocate horizontally across pipes */
2377 	ring_id = 0;
2378 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2379 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2380 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2381 				if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2382 					continue;
2383 
2384 				r = gfx_v9_0_compute_ring_init(adev,
2385 							       ring_id,
2386 							       i, k, j);
2387 				if (r)
2388 					return r;
2389 
2390 				ring_id++;
2391 			}
2392 		}
2393 	}
2394 
2395 	r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
2396 	if (r) {
2397 		DRM_ERROR("Failed to init KIQ BOs!\n");
2398 		return r;
2399 	}
2400 
2401 	kiq = &adev->gfx.kiq;
2402 	r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2403 	if (r)
2404 		return r;
2405 
2406 	/* create MQD for all compute queues as wel as KIQ for SRIOV case */
2407 	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
2408 	if (r)
2409 		return r;
2410 
2411 	adev->gfx.ce_ram_size = 0x8000;
2412 
2413 	r = gfx_v9_0_gpu_early_init(adev);
2414 	if (r)
2415 		return r;
2416 
2417 	return 0;
2418 }
2419 
2420 
2421 static int gfx_v9_0_sw_fini(void *handle)
2422 {
2423 	int i;
2424 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2425 
2426 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2427 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2428 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
2429 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2430 
2431 	amdgpu_gfx_mqd_sw_fini(adev);
2432 	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2433 	amdgpu_gfx_kiq_fini(adev);
2434 
2435 	gfx_v9_0_mec_fini(adev);
2436 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2437 				&adev->gfx.rlc.clear_state_gpu_addr,
2438 				(void **)&adev->gfx.rlc.cs_ptr);
2439 	if (adev->flags & AMD_IS_APU) {
2440 		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2441 				&adev->gfx.rlc.cp_table_gpu_addr,
2442 				(void **)&adev->gfx.rlc.cp_table_ptr);
2443 	}
2444 	gfx_v9_0_free_microcode(adev);
2445 
2446 	return 0;
2447 }
2448 
2449 
2450 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2451 {
2452 	/* TODO */
2453 }
2454 
2455 void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
2456 			   u32 instance)
2457 {
2458 	u32 data;
2459 
2460 	if (instance == 0xffffffff)
2461 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2462 	else
2463 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2464 
2465 	if (se_num == 0xffffffff)
2466 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2467 	else
2468 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2469 
2470 	if (sh_num == 0xffffffff)
2471 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2472 	else
2473 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2474 
2475 	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2476 }
2477 
2478 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2479 {
2480 	u32 data, mask;
2481 
2482 	data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2483 	data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2484 
2485 	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2486 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2487 
2488 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2489 					 adev->gfx.config.max_sh_per_se);
2490 
2491 	return (~data) & mask;
2492 }
2493 
2494 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2495 {
2496 	int i, j;
2497 	u32 data;
2498 	u32 active_rbs = 0;
2499 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2500 					adev->gfx.config.max_sh_per_se;
2501 
2502 	mutex_lock(&adev->grbm_idx_mutex);
2503 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2504 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2505 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2506 			data = gfx_v9_0_get_rb_active_bitmap(adev);
2507 			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2508 					       rb_bitmap_width_per_sh);
2509 		}
2510 	}
2511 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2512 	mutex_unlock(&adev->grbm_idx_mutex);
2513 
2514 	adev->gfx.config.backend_enable_mask = active_rbs;
2515 	adev->gfx.config.num_rbs = hweight32(active_rbs);
2516 }
2517 
2518 #define DEFAULT_SH_MEM_BASES	(0x6000)
2519 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2520 {
2521 	int i;
2522 	uint32_t sh_mem_config;
2523 	uint32_t sh_mem_bases;
2524 
2525 	/*
2526 	 * Configure apertures:
2527 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2528 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2529 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2530 	 */
2531 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2532 
2533 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2534 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2535 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2536 
2537 	mutex_lock(&adev->srbm_mutex);
2538 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2539 		soc15_grbm_select(adev, 0, 0, 0, i);
2540 		/* CP and shaders */
2541 		WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2542 		WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2543 	}
2544 	soc15_grbm_select(adev, 0, 0, 0, 0);
2545 	mutex_unlock(&adev->srbm_mutex);
2546 
2547 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
2548 	   access. These should be enabled by FW for target VMIDs. */
2549 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2550 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2551 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2552 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2553 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2554 	}
2555 }
2556 
2557 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2558 {
2559 	int vmid;
2560 
2561 	/*
2562 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2563 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
2564 	 * the driver can enable them for graphics. VMID0 should maintain
2565 	 * access so that HWS firmware can save/restore entries.
2566 	 */
2567 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
2568 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2569 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2570 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2571 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2572 	}
2573 }
2574 
2575 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2576 {
2577 	uint32_t tmp;
2578 
2579 	switch (adev->ip_versions[GC_HWIP][0]) {
2580 	case IP_VERSION(9, 4, 1):
2581 		tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2582 		tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
2583 					DISABLE_BARRIER_WAITCNT, 1);
2584 		WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2585 		break;
2586 	default:
2587 		break;
2588 	}
2589 }
2590 
2591 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2592 {
2593 	u32 tmp;
2594 	int i;
2595 
2596 	WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2597 
2598 	gfx_v9_0_tiling_mode_table_init(adev);
2599 
2600 	if (adev->gfx.num_gfx_rings)
2601 		gfx_v9_0_setup_rb(adev);
2602 	gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2603 	adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2604 
2605 	/* XXX SH_MEM regs */
2606 	/* where to put LDS, scratch, GPUVM in FSA64 space */
2607 	mutex_lock(&adev->srbm_mutex);
2608 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
2609 		soc15_grbm_select(adev, 0, 0, 0, i);
2610 		/* CP and shaders */
2611 		if (i == 0) {
2612 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2613 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2614 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2615 					    !!adev->gmc.noretry);
2616 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2617 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2618 		} else {
2619 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2620 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2621 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2622 					    !!adev->gmc.noretry);
2623 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2624 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2625 				(adev->gmc.private_aperture_start >> 48));
2626 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2627 				(adev->gmc.shared_aperture_start >> 48));
2628 			WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2629 		}
2630 	}
2631 	soc15_grbm_select(adev, 0, 0, 0, 0);
2632 
2633 	mutex_unlock(&adev->srbm_mutex);
2634 
2635 	gfx_v9_0_init_compute_vmid(adev);
2636 	gfx_v9_0_init_gds_vmid(adev);
2637 	gfx_v9_0_init_sq_config(adev);
2638 }
2639 
2640 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2641 {
2642 	u32 i, j, k;
2643 	u32 mask;
2644 
2645 	mutex_lock(&adev->grbm_idx_mutex);
2646 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2647 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2648 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2649 			for (k = 0; k < adev->usec_timeout; k++) {
2650 				if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2651 					break;
2652 				udelay(1);
2653 			}
2654 			if (k == adev->usec_timeout) {
2655 				gfx_v9_0_select_se_sh(adev, 0xffffffff,
2656 						      0xffffffff, 0xffffffff);
2657 				mutex_unlock(&adev->grbm_idx_mutex);
2658 				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2659 					 i, j);
2660 				return;
2661 			}
2662 		}
2663 	}
2664 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2665 	mutex_unlock(&adev->grbm_idx_mutex);
2666 
2667 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2668 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2669 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2670 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2671 	for (k = 0; k < adev->usec_timeout; k++) {
2672 		if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2673 			break;
2674 		udelay(1);
2675 	}
2676 }
2677 
2678 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2679 					       bool enable)
2680 {
2681 	u32 tmp;
2682 
2683 	/* These interrupts should be enabled to drive DS clock */
2684 
2685 	tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2686 
2687 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2688 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2689 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2690 	if(adev->gfx.num_gfx_rings)
2691 		tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2692 
2693 	WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2694 }
2695 
2696 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2697 {
2698 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2699 	/* csib */
2700 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2701 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
2702 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2703 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2704 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2705 			adev->gfx.rlc.clear_state_size);
2706 }
2707 
2708 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2709 				int indirect_offset,
2710 				int list_size,
2711 				int *unique_indirect_regs,
2712 				int unique_indirect_reg_count,
2713 				int *indirect_start_offsets,
2714 				int *indirect_start_offsets_count,
2715 				int max_start_offsets_count)
2716 {
2717 	int idx;
2718 
2719 	for (; indirect_offset < list_size; indirect_offset++) {
2720 		WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2721 		indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2722 		*indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2723 
2724 		while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2725 			indirect_offset += 2;
2726 
2727 			/* look for the matching indice */
2728 			for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2729 				if (unique_indirect_regs[idx] ==
2730 					register_list_format[indirect_offset] ||
2731 					!unique_indirect_regs[idx])
2732 					break;
2733 			}
2734 
2735 			BUG_ON(idx >= unique_indirect_reg_count);
2736 
2737 			if (!unique_indirect_regs[idx])
2738 				unique_indirect_regs[idx] = register_list_format[indirect_offset];
2739 
2740 			indirect_offset++;
2741 		}
2742 	}
2743 }
2744 
2745 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2746 {
2747 	int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2748 	int unique_indirect_reg_count = 0;
2749 
2750 	int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2751 	int indirect_start_offsets_count = 0;
2752 
2753 	int list_size = 0;
2754 	int i = 0, j = 0;
2755 	u32 tmp = 0;
2756 
2757 	u32 *register_list_format =
2758 		kmemdup(adev->gfx.rlc.register_list_format,
2759 			adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2760 	if (!register_list_format)
2761 		return -ENOMEM;
2762 
2763 	/* setup unique_indirect_regs array and indirect_start_offsets array */
2764 	unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2765 	gfx_v9_1_parse_ind_reg_list(register_list_format,
2766 				    adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2767 				    adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2768 				    unique_indirect_regs,
2769 				    unique_indirect_reg_count,
2770 				    indirect_start_offsets,
2771 				    &indirect_start_offsets_count,
2772 				    ARRAY_SIZE(indirect_start_offsets));
2773 
2774 	/* enable auto inc in case it is disabled */
2775 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2776 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2777 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2778 
2779 	/* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2780 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2781 		RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2782 	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2783 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2784 			adev->gfx.rlc.register_restore[i]);
2785 
2786 	/* load indirect register */
2787 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2788 		adev->gfx.rlc.reg_list_format_start);
2789 
2790 	/* direct register portion */
2791 	for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2792 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2793 			register_list_format[i]);
2794 
2795 	/* indirect register portion */
2796 	while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2797 		if (register_list_format[i] == 0xFFFFFFFF) {
2798 			WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2799 			continue;
2800 		}
2801 
2802 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2803 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2804 
2805 		for (j = 0; j < unique_indirect_reg_count; j++) {
2806 			if (register_list_format[i] == unique_indirect_regs[j]) {
2807 				WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2808 				break;
2809 			}
2810 		}
2811 
2812 		BUG_ON(j >= unique_indirect_reg_count);
2813 
2814 		i++;
2815 	}
2816 
2817 	/* set save/restore list size */
2818 	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2819 	list_size = list_size >> 1;
2820 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2821 		adev->gfx.rlc.reg_restore_list_size);
2822 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2823 
2824 	/* write the starting offsets to RLC scratch ram */
2825 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2826 		adev->gfx.rlc.starting_offsets_start);
2827 	for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2828 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2829 		       indirect_start_offsets[i]);
2830 
2831 	/* load unique indirect regs*/
2832 	for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2833 		if (unique_indirect_regs[i] != 0) {
2834 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2835 			       + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2836 			       unique_indirect_regs[i] & 0x3FFFF);
2837 
2838 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2839 			       + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2840 			       unique_indirect_regs[i] >> 20);
2841 		}
2842 	}
2843 
2844 	kfree(register_list_format);
2845 	return 0;
2846 }
2847 
2848 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2849 {
2850 	WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2851 }
2852 
2853 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2854 					     bool enable)
2855 {
2856 	uint32_t data = 0;
2857 	uint32_t default_data = 0;
2858 
2859 	default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2860 	if (enable) {
2861 		/* enable GFXIP control over CGPG */
2862 		data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2863 		if(default_data != data)
2864 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2865 
2866 		/* update status */
2867 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2868 		data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2869 		if(default_data != data)
2870 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2871 	} else {
2872 		/* restore GFXIP control over GCPG */
2873 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2874 		if(default_data != data)
2875 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2876 	}
2877 }
2878 
2879 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2880 {
2881 	uint32_t data = 0;
2882 
2883 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2884 			      AMD_PG_SUPPORT_GFX_SMG |
2885 			      AMD_PG_SUPPORT_GFX_DMG)) {
2886 		/* init IDLE_POLL_COUNT = 60 */
2887 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2888 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2889 		data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2890 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2891 
2892 		/* init RLC PG Delay */
2893 		data = 0;
2894 		data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2895 		data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2896 		data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2897 		data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2898 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2899 
2900 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2901 		data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2902 		data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2903 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2904 
2905 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2906 		data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2907 		data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2908 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2909 
2910 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2911 		data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2912 
2913 		/* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2914 		data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2915 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2916 		if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 3, 0))
2917 			pwr_10_0_gfxip_control_over_cgpg(adev, true);
2918 	}
2919 }
2920 
2921 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2922 						bool enable)
2923 {
2924 	uint32_t data = 0;
2925 	uint32_t default_data = 0;
2926 
2927 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2928 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2929 			     SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2930 			     enable ? 1 : 0);
2931 	if (default_data != data)
2932 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2933 }
2934 
2935 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2936 						bool enable)
2937 {
2938 	uint32_t data = 0;
2939 	uint32_t default_data = 0;
2940 
2941 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2942 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2943 			     SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2944 			     enable ? 1 : 0);
2945 	if(default_data != data)
2946 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2947 }
2948 
2949 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2950 					bool enable)
2951 {
2952 	uint32_t data = 0;
2953 	uint32_t default_data = 0;
2954 
2955 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2956 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2957 			     CP_PG_DISABLE,
2958 			     enable ? 0 : 1);
2959 	if(default_data != data)
2960 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2961 }
2962 
2963 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2964 						bool enable)
2965 {
2966 	uint32_t data, default_data;
2967 
2968 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2969 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2970 			     GFX_POWER_GATING_ENABLE,
2971 			     enable ? 1 : 0);
2972 	if(default_data != data)
2973 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2974 }
2975 
2976 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2977 						bool enable)
2978 {
2979 	uint32_t data, default_data;
2980 
2981 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2982 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2983 			     GFX_PIPELINE_PG_ENABLE,
2984 			     enable ? 1 : 0);
2985 	if(default_data != data)
2986 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2987 
2988 	if (!enable)
2989 		/* read any GFX register to wake up GFX */
2990 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2991 }
2992 
2993 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2994 						       bool enable)
2995 {
2996 	uint32_t data, default_data;
2997 
2998 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2999 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3000 			     STATIC_PER_CU_PG_ENABLE,
3001 			     enable ? 1 : 0);
3002 	if(default_data != data)
3003 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3004 }
3005 
3006 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
3007 						bool enable)
3008 {
3009 	uint32_t data, default_data;
3010 
3011 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
3012 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
3013 			     DYN_PER_CU_PG_ENABLE,
3014 			     enable ? 1 : 0);
3015 	if(default_data != data)
3016 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
3017 }
3018 
3019 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
3020 {
3021 	gfx_v9_0_init_csb(adev);
3022 
3023 	/*
3024 	 * Rlc save restore list is workable since v2_1.
3025 	 * And it's needed by gfxoff feature.
3026 	 */
3027 	if (adev->gfx.rlc.is_rlc_v2_1) {
3028 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1) ||
3029 		    (adev->apu_flags & AMD_APU_IS_RAVEN2))
3030 			gfx_v9_1_init_rlc_save_restore_list(adev);
3031 		gfx_v9_0_enable_save_restore_machine(adev);
3032 	}
3033 
3034 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3035 			      AMD_PG_SUPPORT_GFX_SMG |
3036 			      AMD_PG_SUPPORT_GFX_DMG |
3037 			      AMD_PG_SUPPORT_CP |
3038 			      AMD_PG_SUPPORT_GDS |
3039 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
3040 		WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
3041 			     adev->gfx.rlc.cp_table_gpu_addr >> 8);
3042 		gfx_v9_0_init_gfx_power_gating(adev);
3043 	}
3044 }
3045 
3046 static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
3047 {
3048 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
3049 	gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3050 	gfx_v9_0_wait_for_rlc_serdes(adev);
3051 }
3052 
3053 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
3054 {
3055 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3056 	udelay(50);
3057 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
3058 	udelay(50);
3059 }
3060 
3061 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
3062 {
3063 #ifdef AMDGPU_RLC_DEBUG_RETRY
3064 	u32 rlc_ucode_ver;
3065 #endif
3066 
3067 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3068 	udelay(50);
3069 
3070 	/* carrizo do enable cp interrupt after cp inited */
3071 	if (!(adev->flags & AMD_IS_APU)) {
3072 		gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3073 		udelay(50);
3074 	}
3075 
3076 #ifdef AMDGPU_RLC_DEBUG_RETRY
3077 	/* RLC_GPM_GENERAL_6 : RLC Ucode version */
3078 	rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3079 	if(rlc_ucode_ver == 0x108) {
3080 		DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
3081 				rlc_ucode_ver, adev->gfx.rlc_fw_version);
3082 		/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3083 		 * default is 0x9C4 to create a 100us interval */
3084 		WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3085 		/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3086 		 * to disable the page fault retry interrupts, default is
3087 		 * 0x100 (256) */
3088 		WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3089 	}
3090 #endif
3091 }
3092 
3093 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3094 {
3095 	const struct rlc_firmware_header_v2_0 *hdr;
3096 	const __le32 *fw_data;
3097 	unsigned i, fw_size;
3098 
3099 	if (!adev->gfx.rlc_fw)
3100 		return -EINVAL;
3101 
3102 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3103 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
3104 
3105 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3106 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3107 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3108 
3109 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3110 			RLCG_UCODE_LOADING_START_ADDRESS);
3111 	for (i = 0; i < fw_size; i++)
3112 		WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3113 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3114 
3115 	return 0;
3116 }
3117 
3118 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3119 {
3120 	int r;
3121 
3122 	if (amdgpu_sriov_vf(adev)) {
3123 		gfx_v9_0_init_csb(adev);
3124 		return 0;
3125 	}
3126 
3127 	adev->gfx.rlc.funcs->stop(adev);
3128 
3129 	/* disable CG */
3130 	WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3131 
3132 	gfx_v9_0_init_pg(adev);
3133 
3134 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3135 		/* legacy rlc firmware loading */
3136 		r = gfx_v9_0_rlc_load_microcode(adev);
3137 		if (r)
3138 			return r;
3139 	}
3140 
3141 	switch (adev->ip_versions[GC_HWIP][0]) {
3142 	case IP_VERSION(9, 2, 2):
3143 	case IP_VERSION(9, 1, 0):
3144 		if (amdgpu_lbpw == 0)
3145 			gfx_v9_0_enable_lbpw(adev, false);
3146 		else
3147 			gfx_v9_0_enable_lbpw(adev, true);
3148 		break;
3149 	case IP_VERSION(9, 4, 0):
3150 		if (amdgpu_lbpw > 0)
3151 			gfx_v9_0_enable_lbpw(adev, true);
3152 		else
3153 			gfx_v9_0_enable_lbpw(adev, false);
3154 		break;
3155 	default:
3156 		break;
3157 	}
3158 
3159 	adev->gfx.rlc.funcs->start(adev);
3160 
3161 	return 0;
3162 }
3163 
3164 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3165 {
3166 	u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3167 
3168 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3169 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3170 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3171 	WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3172 	udelay(50);
3173 }
3174 
3175 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3176 {
3177 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
3178 	const struct gfx_firmware_header_v1_0 *ce_hdr;
3179 	const struct gfx_firmware_header_v1_0 *me_hdr;
3180 	const __le32 *fw_data;
3181 	unsigned i, fw_size;
3182 
3183 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3184 		return -EINVAL;
3185 
3186 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3187 		adev->gfx.pfp_fw->data;
3188 	ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3189 		adev->gfx.ce_fw->data;
3190 	me_hdr = (const struct gfx_firmware_header_v1_0 *)
3191 		adev->gfx.me_fw->data;
3192 
3193 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3194 	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3195 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3196 
3197 	gfx_v9_0_cp_gfx_enable(adev, false);
3198 
3199 	/* PFP */
3200 	fw_data = (const __le32 *)
3201 		(adev->gfx.pfp_fw->data +
3202 		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3203 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3204 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3205 	for (i = 0; i < fw_size; i++)
3206 		WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3207 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3208 
3209 	/* CE */
3210 	fw_data = (const __le32 *)
3211 		(adev->gfx.ce_fw->data +
3212 		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3213 	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3214 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3215 	for (i = 0; i < fw_size; i++)
3216 		WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3217 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3218 
3219 	/* ME */
3220 	fw_data = (const __le32 *)
3221 		(adev->gfx.me_fw->data +
3222 		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3223 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3224 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3225 	for (i = 0; i < fw_size; i++)
3226 		WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3227 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3228 
3229 	return 0;
3230 }
3231 
3232 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3233 {
3234 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3235 	const struct cs_section_def *sect = NULL;
3236 	const struct cs_extent_def *ext = NULL;
3237 	int r, i, tmp;
3238 
3239 	/* init the CP */
3240 	WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3241 	WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3242 
3243 	gfx_v9_0_cp_gfx_enable(adev, true);
3244 
3245 	r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3246 	if (r) {
3247 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3248 		return r;
3249 	}
3250 
3251 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3252 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3253 
3254 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3255 	amdgpu_ring_write(ring, 0x80000000);
3256 	amdgpu_ring_write(ring, 0x80000000);
3257 
3258 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3259 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3260 			if (sect->id == SECT_CONTEXT) {
3261 				amdgpu_ring_write(ring,
3262 				       PACKET3(PACKET3_SET_CONTEXT_REG,
3263 					       ext->reg_count));
3264 				amdgpu_ring_write(ring,
3265 				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3266 				for (i = 0; i < ext->reg_count; i++)
3267 					amdgpu_ring_write(ring, ext->extent[i]);
3268 			}
3269 		}
3270 	}
3271 
3272 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3273 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3274 
3275 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3276 	amdgpu_ring_write(ring, 0);
3277 
3278 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3279 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3280 	amdgpu_ring_write(ring, 0x8000);
3281 	amdgpu_ring_write(ring, 0x8000);
3282 
3283 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3284 	tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3285 		(SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3286 	amdgpu_ring_write(ring, tmp);
3287 	amdgpu_ring_write(ring, 0);
3288 
3289 	amdgpu_ring_commit(ring);
3290 
3291 	return 0;
3292 }
3293 
3294 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3295 {
3296 	struct amdgpu_ring *ring;
3297 	u32 tmp;
3298 	u32 rb_bufsz;
3299 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
3300 
3301 	/* Set the write pointer delay */
3302 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3303 
3304 	/* set the RB to use vmid 0 */
3305 	WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3306 
3307 	/* Set ring buffer size */
3308 	ring = &adev->gfx.gfx_ring[0];
3309 	rb_bufsz = order_base_2(ring->ring_size / 8);
3310 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3311 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3312 #ifdef __BIG_ENDIAN
3313 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3314 #endif
3315 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3316 
3317 	/* Initialize the ring buffer's write pointers */
3318 	ring->wptr = 0;
3319 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3320 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3321 
3322 	/* set the wb address wether it's enabled or not */
3323 	rptr_addr = ring->rptr_gpu_addr;
3324 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3325 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3326 
3327 	wptr_gpu_addr = ring->wptr_gpu_addr;
3328 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3329 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3330 
3331 	mdelay(1);
3332 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3333 
3334 	rb_addr = ring->gpu_addr >> 8;
3335 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3336 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3337 
3338 	tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3339 	if (ring->use_doorbell) {
3340 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3341 				    DOORBELL_OFFSET, ring->doorbell_index);
3342 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3343 				    DOORBELL_EN, 1);
3344 	} else {
3345 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3346 	}
3347 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3348 
3349 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3350 			DOORBELL_RANGE_LOWER, ring->doorbell_index);
3351 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3352 
3353 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3354 		       CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3355 
3356 
3357 	/* start the ring */
3358 	gfx_v9_0_cp_gfx_start(adev);
3359 	ring->sched.ready = true;
3360 
3361 	return 0;
3362 }
3363 
3364 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3365 {
3366 	if (enable) {
3367 		WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3368 	} else {
3369 		WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3370 			(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3371 		adev->gfx.kiq.ring.sched.ready = false;
3372 	}
3373 	udelay(50);
3374 }
3375 
3376 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3377 {
3378 	const struct gfx_firmware_header_v1_0 *mec_hdr;
3379 	const __le32 *fw_data;
3380 	unsigned i;
3381 	u32 tmp;
3382 
3383 	if (!adev->gfx.mec_fw)
3384 		return -EINVAL;
3385 
3386 	gfx_v9_0_cp_compute_enable(adev, false);
3387 
3388 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3389 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3390 
3391 	fw_data = (const __le32 *)
3392 		(adev->gfx.mec_fw->data +
3393 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3394 	tmp = 0;
3395 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3396 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3397 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3398 
3399 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3400 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3401 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3402 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3403 
3404 	/* MEC1 */
3405 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3406 			 mec_hdr->jt_offset);
3407 	for (i = 0; i < mec_hdr->jt_size; i++)
3408 		WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3409 			le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3410 
3411 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3412 			adev->gfx.mec_fw_version);
3413 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3414 
3415 	return 0;
3416 }
3417 
3418 /* KIQ functions */
3419 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3420 {
3421 	uint32_t tmp;
3422 	struct amdgpu_device *adev = ring->adev;
3423 
3424 	/* tell RLC which is KIQ queue */
3425 	tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3426 	tmp &= 0xffffff00;
3427 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3428 	WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3429 	tmp |= 0x80;
3430 	WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3431 }
3432 
3433 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3434 {
3435 	struct amdgpu_device *adev = ring->adev;
3436 
3437 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3438 		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
3439 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3440 			mqd->cp_hqd_queue_priority =
3441 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3442 		}
3443 	}
3444 }
3445 
3446 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3447 {
3448 	struct amdgpu_device *adev = ring->adev;
3449 	struct v9_mqd *mqd = ring->mqd_ptr;
3450 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3451 	uint32_t tmp;
3452 
3453 	mqd->header = 0xC0310800;
3454 	mqd->compute_pipelinestat_enable = 0x00000001;
3455 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3456 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3457 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3458 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3459 	mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3460 	mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3461 	mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3462 	mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3463 	mqd->compute_misc_reserved = 0x00000003;
3464 
3465 	mqd->dynamic_cu_mask_addr_lo =
3466 		lower_32_bits(ring->mqd_gpu_addr
3467 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3468 	mqd->dynamic_cu_mask_addr_hi =
3469 		upper_32_bits(ring->mqd_gpu_addr
3470 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3471 
3472 	eop_base_addr = ring->eop_gpu_addr >> 8;
3473 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3474 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3475 
3476 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3477 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3478 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3479 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3480 
3481 	mqd->cp_hqd_eop_control = tmp;
3482 
3483 	/* enable doorbell? */
3484 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3485 
3486 	if (ring->use_doorbell) {
3487 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3488 				    DOORBELL_OFFSET, ring->doorbell_index);
3489 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3490 				    DOORBELL_EN, 1);
3491 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3492 				    DOORBELL_SOURCE, 0);
3493 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3494 				    DOORBELL_HIT, 0);
3495 	} else {
3496 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3497 					 DOORBELL_EN, 0);
3498 	}
3499 
3500 	mqd->cp_hqd_pq_doorbell_control = tmp;
3501 
3502 	/* disable the queue if it's active */
3503 	ring->wptr = 0;
3504 	mqd->cp_hqd_dequeue_request = 0;
3505 	mqd->cp_hqd_pq_rptr = 0;
3506 	mqd->cp_hqd_pq_wptr_lo = 0;
3507 	mqd->cp_hqd_pq_wptr_hi = 0;
3508 
3509 	/* set the pointer to the MQD */
3510 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3511 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3512 
3513 	/* set MQD vmid to 0 */
3514 	tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3515 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3516 	mqd->cp_mqd_control = tmp;
3517 
3518 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3519 	hqd_gpu_addr = ring->gpu_addr >> 8;
3520 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3521 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3522 
3523 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3524 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3525 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3526 			    (order_base_2(ring->ring_size / 4) - 1));
3527 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3528 			(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
3529 #ifdef __BIG_ENDIAN
3530 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3531 #endif
3532 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3533 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3534 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3535 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3536 	mqd->cp_hqd_pq_control = tmp;
3537 
3538 	/* set the wb address whether it's enabled or not */
3539 	wb_gpu_addr = ring->rptr_gpu_addr;
3540 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3541 	mqd->cp_hqd_pq_rptr_report_addr_hi =
3542 		upper_32_bits(wb_gpu_addr) & 0xffff;
3543 
3544 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3545 	wb_gpu_addr = ring->wptr_gpu_addr;
3546 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3547 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3548 
3549 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3550 	ring->wptr = 0;
3551 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3552 
3553 	/* set the vmid for the queue */
3554 	mqd->cp_hqd_vmid = 0;
3555 
3556 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3557 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3558 	mqd->cp_hqd_persistent_state = tmp;
3559 
3560 	/* set MIN_IB_AVAIL_SIZE */
3561 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3562 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3563 	mqd->cp_hqd_ib_control = tmp;
3564 
3565 	/* set static priority for a queue/ring */
3566 	gfx_v9_0_mqd_set_priority(ring, mqd);
3567 	mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
3568 
3569 	/* map_queues packet doesn't need activate the queue,
3570 	 * so only kiq need set this field.
3571 	 */
3572 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3573 		mqd->cp_hqd_active = 1;
3574 
3575 	return 0;
3576 }
3577 
3578 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3579 {
3580 	struct amdgpu_device *adev = ring->adev;
3581 	struct v9_mqd *mqd = ring->mqd_ptr;
3582 	int j;
3583 
3584 	/* disable wptr polling */
3585 	WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3586 
3587 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3588 	       mqd->cp_hqd_eop_base_addr_lo);
3589 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3590 	       mqd->cp_hqd_eop_base_addr_hi);
3591 
3592 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3593 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3594 	       mqd->cp_hqd_eop_control);
3595 
3596 	/* enable doorbell? */
3597 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3598 	       mqd->cp_hqd_pq_doorbell_control);
3599 
3600 	/* disable the queue if it's active */
3601 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3602 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3603 		for (j = 0; j < adev->usec_timeout; j++) {
3604 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3605 				break;
3606 			udelay(1);
3607 		}
3608 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3609 		       mqd->cp_hqd_dequeue_request);
3610 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3611 		       mqd->cp_hqd_pq_rptr);
3612 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3613 		       mqd->cp_hqd_pq_wptr_lo);
3614 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3615 		       mqd->cp_hqd_pq_wptr_hi);
3616 	}
3617 
3618 	/* set the pointer to the MQD */
3619 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3620 	       mqd->cp_mqd_base_addr_lo);
3621 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3622 	       mqd->cp_mqd_base_addr_hi);
3623 
3624 	/* set MQD vmid to 0 */
3625 	WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3626 	       mqd->cp_mqd_control);
3627 
3628 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3629 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3630 	       mqd->cp_hqd_pq_base_lo);
3631 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3632 	       mqd->cp_hqd_pq_base_hi);
3633 
3634 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3635 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3636 	       mqd->cp_hqd_pq_control);
3637 
3638 	/* set the wb address whether it's enabled or not */
3639 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3640 				mqd->cp_hqd_pq_rptr_report_addr_lo);
3641 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3642 				mqd->cp_hqd_pq_rptr_report_addr_hi);
3643 
3644 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3645 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3646 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
3647 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3648 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
3649 
3650 	/* enable the doorbell if requested */
3651 	if (ring->use_doorbell) {
3652 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3653 					(adev->doorbell_index.kiq * 2) << 2);
3654 		/* If GC has entered CGPG, ringing doorbell > first page
3655 		 * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
3656 		 * workaround this issue. And this change has to align with firmware
3657 		 * update.
3658 		 */
3659 		if (check_if_enlarge_doorbell_range(adev))
3660 			WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3661 					(adev->doorbell.size - 4));
3662 		else
3663 			WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3664 					(adev->doorbell_index.userqueue_end * 2) << 2);
3665 	}
3666 
3667 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3668 	       mqd->cp_hqd_pq_doorbell_control);
3669 
3670 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3671 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3672 	       mqd->cp_hqd_pq_wptr_lo);
3673 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3674 	       mqd->cp_hqd_pq_wptr_hi);
3675 
3676 	/* set the vmid for the queue */
3677 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3678 
3679 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3680 	       mqd->cp_hqd_persistent_state);
3681 
3682 	/* activate the queue */
3683 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3684 	       mqd->cp_hqd_active);
3685 
3686 	if (ring->use_doorbell)
3687 		WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3688 
3689 	return 0;
3690 }
3691 
3692 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3693 {
3694 	struct amdgpu_device *adev = ring->adev;
3695 	int j;
3696 
3697 	/* disable the queue if it's active */
3698 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3699 
3700 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3701 
3702 		for (j = 0; j < adev->usec_timeout; j++) {
3703 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3704 				break;
3705 			udelay(1);
3706 		}
3707 
3708 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3709 			DRM_DEBUG("KIQ dequeue request failed.\n");
3710 
3711 			/* Manual disable if dequeue request times out */
3712 			WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3713 		}
3714 
3715 		WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3716 		      0);
3717 	}
3718 
3719 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3720 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3721 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3722 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3723 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3724 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3725 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3726 	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3727 
3728 	return 0;
3729 }
3730 
3731 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3732 {
3733 	struct amdgpu_device *adev = ring->adev;
3734 	struct v9_mqd *mqd = ring->mqd_ptr;
3735 	int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3736 	struct v9_mqd *tmp_mqd;
3737 
3738 	gfx_v9_0_kiq_setting(ring);
3739 
3740 	/* GPU could be in bad state during probe, driver trigger the reset
3741 	 * after load the SMU, in this case , the mqd is not be initialized.
3742 	 * driver need to re-init the mqd.
3743 	 * check mqd->cp_hqd_pq_control since this value should not be 0
3744 	 */
3745 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3746 	if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
3747 		/* for GPU_RESET case , reset MQD to a clean status */
3748 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3749 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3750 
3751 		/* reset ring buffer */
3752 		ring->wptr = 0;
3753 		amdgpu_ring_clear_ring(ring);
3754 
3755 		mutex_lock(&adev->srbm_mutex);
3756 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3757 		gfx_v9_0_kiq_init_register(ring);
3758 		soc15_grbm_select(adev, 0, 0, 0, 0);
3759 		mutex_unlock(&adev->srbm_mutex);
3760 	} else {
3761 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3762 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3763 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3764 		mutex_lock(&adev->srbm_mutex);
3765 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3766 		gfx_v9_0_mqd_init(ring);
3767 		gfx_v9_0_kiq_init_register(ring);
3768 		soc15_grbm_select(adev, 0, 0, 0, 0);
3769 		mutex_unlock(&adev->srbm_mutex);
3770 
3771 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3772 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3773 	}
3774 
3775 	return 0;
3776 }
3777 
3778 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3779 {
3780 	struct amdgpu_device *adev = ring->adev;
3781 	struct v9_mqd *mqd = ring->mqd_ptr;
3782 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
3783 	struct v9_mqd *tmp_mqd;
3784 
3785 	/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
3786 	 * is not be initialized before
3787 	 */
3788 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
3789 
3790 	if (!tmp_mqd->cp_hqd_pq_control ||
3791 	    (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
3792 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3793 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3794 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3795 		mutex_lock(&adev->srbm_mutex);
3796 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3797 		gfx_v9_0_mqd_init(ring);
3798 		soc15_grbm_select(adev, 0, 0, 0, 0);
3799 		mutex_unlock(&adev->srbm_mutex);
3800 
3801 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3802 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3803 	} else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3804 		/* reset MQD to a clean status */
3805 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3806 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3807 
3808 		/* reset ring buffer */
3809 		ring->wptr = 0;
3810 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
3811 		amdgpu_ring_clear_ring(ring);
3812 	} else {
3813 		amdgpu_ring_clear_ring(ring);
3814 	}
3815 
3816 	return 0;
3817 }
3818 
3819 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3820 {
3821 	struct amdgpu_ring *ring;
3822 	int r;
3823 
3824 	ring = &adev->gfx.kiq.ring;
3825 
3826 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
3827 	if (unlikely(r != 0))
3828 		return r;
3829 
3830 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3831 	if (unlikely(r != 0))
3832 		return r;
3833 
3834 	gfx_v9_0_kiq_init_queue(ring);
3835 	amdgpu_bo_kunmap(ring->mqd_obj);
3836 	ring->mqd_ptr = NULL;
3837 	amdgpu_bo_unreserve(ring->mqd_obj);
3838 	ring->sched.ready = true;
3839 	return 0;
3840 }
3841 
3842 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3843 {
3844 	struct amdgpu_ring *ring = NULL;
3845 	int r = 0, i;
3846 
3847 	gfx_v9_0_cp_compute_enable(adev, true);
3848 
3849 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3850 		ring = &adev->gfx.compute_ring[i];
3851 
3852 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
3853 		if (unlikely(r != 0))
3854 			goto done;
3855 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3856 		if (!r) {
3857 			r = gfx_v9_0_kcq_init_queue(ring);
3858 			amdgpu_bo_kunmap(ring->mqd_obj);
3859 			ring->mqd_ptr = NULL;
3860 		}
3861 		amdgpu_bo_unreserve(ring->mqd_obj);
3862 		if (r)
3863 			goto done;
3864 	}
3865 
3866 	r = amdgpu_gfx_enable_kcq(adev);
3867 done:
3868 	return r;
3869 }
3870 
3871 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3872 {
3873 	int r, i;
3874 	struct amdgpu_ring *ring;
3875 
3876 	if (!(adev->flags & AMD_IS_APU))
3877 		gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3878 
3879 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3880 		if (adev->gfx.num_gfx_rings) {
3881 			/* legacy firmware loading */
3882 			r = gfx_v9_0_cp_gfx_load_microcode(adev);
3883 			if (r)
3884 				return r;
3885 		}
3886 
3887 		r = gfx_v9_0_cp_compute_load_microcode(adev);
3888 		if (r)
3889 			return r;
3890 	}
3891 
3892 	r = gfx_v9_0_kiq_resume(adev);
3893 	if (r)
3894 		return r;
3895 
3896 	if (adev->gfx.num_gfx_rings) {
3897 		r = gfx_v9_0_cp_gfx_resume(adev);
3898 		if (r)
3899 			return r;
3900 	}
3901 
3902 	r = gfx_v9_0_kcq_resume(adev);
3903 	if (r)
3904 		return r;
3905 
3906 	if (adev->gfx.num_gfx_rings) {
3907 		ring = &adev->gfx.gfx_ring[0];
3908 		r = amdgpu_ring_test_helper(ring);
3909 		if (r)
3910 			return r;
3911 	}
3912 
3913 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3914 		ring = &adev->gfx.compute_ring[i];
3915 		amdgpu_ring_test_helper(ring);
3916 	}
3917 
3918 	gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3919 
3920 	return 0;
3921 }
3922 
3923 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3924 {
3925 	u32 tmp;
3926 
3927 	if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1) &&
3928 	    adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2))
3929 		return;
3930 
3931 	tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
3932 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
3933 				adev->df.hash_status.hash_64k);
3934 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
3935 				adev->df.hash_status.hash_2m);
3936 	tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
3937 				adev->df.hash_status.hash_1g);
3938 	WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
3939 }
3940 
3941 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3942 {
3943 	if (adev->gfx.num_gfx_rings)
3944 		gfx_v9_0_cp_gfx_enable(adev, enable);
3945 	gfx_v9_0_cp_compute_enable(adev, enable);
3946 }
3947 
3948 static int gfx_v9_0_hw_init(void *handle)
3949 {
3950 	int r;
3951 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3952 
3953 	if (!amdgpu_sriov_vf(adev))
3954 		gfx_v9_0_init_golden_registers(adev);
3955 
3956 	gfx_v9_0_constants_init(adev);
3957 
3958 	gfx_v9_0_init_tcp_config(adev);
3959 
3960 	r = adev->gfx.rlc.funcs->resume(adev);
3961 	if (r)
3962 		return r;
3963 
3964 	r = gfx_v9_0_cp_resume(adev);
3965 	if (r)
3966 		return r;
3967 
3968 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
3969 		gfx_v9_4_2_set_power_brake_sequence(adev);
3970 
3971 	return r;
3972 }
3973 
3974 static int gfx_v9_0_hw_fini(void *handle)
3975 {
3976 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3977 
3978 	amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3979 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3980 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3981 
3982 	/* DF freeze and kcq disable will fail */
3983 	if (!amdgpu_ras_intr_triggered())
3984 		/* disable KCQ to avoid CPC touch memory not valid anymore */
3985 		amdgpu_gfx_disable_kcq(adev);
3986 
3987 	if (amdgpu_sriov_vf(adev)) {
3988 		gfx_v9_0_cp_gfx_enable(adev, false);
3989 		/* must disable polling for SRIOV when hw finished, otherwise
3990 		 * CPC engine may still keep fetching WB address which is already
3991 		 * invalid after sw finished and trigger DMAR reading error in
3992 		 * hypervisor side.
3993 		 */
3994 		WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3995 		return 0;
3996 	}
3997 
3998 	/* Use deinitialize sequence from CAIL when unbinding device from driver,
3999 	 * otherwise KIQ is hanging when binding back
4000 	 */
4001 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4002 		mutex_lock(&adev->srbm_mutex);
4003 		soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
4004 				adev->gfx.kiq.ring.pipe,
4005 				adev->gfx.kiq.ring.queue, 0);
4006 		gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
4007 		soc15_grbm_select(adev, 0, 0, 0, 0);
4008 		mutex_unlock(&adev->srbm_mutex);
4009 	}
4010 
4011 	gfx_v9_0_cp_enable(adev, false);
4012 
4013 	/* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
4014 	if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
4015 	    (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {
4016 		dev_dbg(adev->dev, "Skipping RLC halt\n");
4017 		return 0;
4018 	}
4019 
4020 	adev->gfx.rlc.funcs->stop(adev);
4021 	return 0;
4022 }
4023 
4024 static int gfx_v9_0_suspend(void *handle)
4025 {
4026 	return gfx_v9_0_hw_fini(handle);
4027 }
4028 
4029 static int gfx_v9_0_resume(void *handle)
4030 {
4031 	return gfx_v9_0_hw_init(handle);
4032 }
4033 
4034 static bool gfx_v9_0_is_idle(void *handle)
4035 {
4036 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4037 
4038 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
4039 				GRBM_STATUS, GUI_ACTIVE))
4040 		return false;
4041 	else
4042 		return true;
4043 }
4044 
4045 static int gfx_v9_0_wait_for_idle(void *handle)
4046 {
4047 	unsigned i;
4048 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4049 
4050 	for (i = 0; i < adev->usec_timeout; i++) {
4051 		if (gfx_v9_0_is_idle(handle))
4052 			return 0;
4053 		udelay(1);
4054 	}
4055 	return -ETIMEDOUT;
4056 }
4057 
4058 static int gfx_v9_0_soft_reset(void *handle)
4059 {
4060 	u32 grbm_soft_reset = 0;
4061 	u32 tmp;
4062 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4063 
4064 	/* GRBM_STATUS */
4065 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
4066 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4067 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4068 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4069 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4070 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4071 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4072 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4073 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4074 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4075 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4076 	}
4077 
4078 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4079 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4080 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4081 	}
4082 
4083 	/* GRBM_STATUS2 */
4084 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4085 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4086 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4087 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4088 
4089 
4090 	if (grbm_soft_reset) {
4091 		/* stop the rlc */
4092 		adev->gfx.rlc.funcs->stop(adev);
4093 
4094 		if (adev->gfx.num_gfx_rings)
4095 			/* Disable GFX parsing/prefetching */
4096 			gfx_v9_0_cp_gfx_enable(adev, false);
4097 
4098 		/* Disable MEC parsing/prefetching */
4099 		gfx_v9_0_cp_compute_enable(adev, false);
4100 
4101 		if (grbm_soft_reset) {
4102 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4103 			tmp |= grbm_soft_reset;
4104 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4105 			WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4106 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4107 
4108 			udelay(50);
4109 
4110 			tmp &= ~grbm_soft_reset;
4111 			WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4112 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4113 		}
4114 
4115 		/* Wait a little for things to settle down */
4116 		udelay(50);
4117 	}
4118 	return 0;
4119 }
4120 
4121 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4122 {
4123 	signed long r, cnt = 0;
4124 	unsigned long flags;
4125 	uint32_t seq, reg_val_offs = 0;
4126 	uint64_t value = 0;
4127 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4128 	struct amdgpu_ring *ring = &kiq->ring;
4129 
4130 	BUG_ON(!ring->funcs->emit_rreg);
4131 
4132 	spin_lock_irqsave(&kiq->ring_lock, flags);
4133 	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
4134 		pr_err("critical bug! too many kiq readers\n");
4135 		goto failed_unlock;
4136 	}
4137 	amdgpu_ring_alloc(ring, 32);
4138 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4139 	amdgpu_ring_write(ring, 9 |	/* src: register*/
4140 				(5 << 8) |	/* dst: memory */
4141 				(1 << 16) |	/* count sel */
4142 				(1 << 20));	/* write confirm */
4143 	amdgpu_ring_write(ring, 0);
4144 	amdgpu_ring_write(ring, 0);
4145 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4146 				reg_val_offs * 4));
4147 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4148 				reg_val_offs * 4));
4149 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4150 	if (r)
4151 		goto failed_undo;
4152 
4153 	amdgpu_ring_commit(ring);
4154 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
4155 
4156 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4157 
4158 	/* don't wait anymore for gpu reset case because this way may
4159 	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4160 	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4161 	 * never return if we keep waiting in virt_kiq_rreg, which cause
4162 	 * gpu_recover() hang there.
4163 	 *
4164 	 * also don't wait anymore for IRQ context
4165 	 * */
4166 	if (r < 1 && (amdgpu_in_reset(adev)))
4167 		goto failed_kiq_read;
4168 
4169 	might_sleep();
4170 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4171 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4172 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4173 	}
4174 
4175 	if (cnt > MAX_KIQ_REG_TRY)
4176 		goto failed_kiq_read;
4177 
4178 	mb();
4179 	value = (uint64_t)adev->wb.wb[reg_val_offs] |
4180 		(uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4181 	amdgpu_device_wb_free(adev, reg_val_offs);
4182 	return value;
4183 
4184 failed_undo:
4185 	amdgpu_ring_undo(ring);
4186 failed_unlock:
4187 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
4188 failed_kiq_read:
4189 	if (reg_val_offs)
4190 		amdgpu_device_wb_free(adev, reg_val_offs);
4191 	pr_err("failed to read gpu clock\n");
4192 	return ~0;
4193 }
4194 
4195 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4196 {
4197 	uint64_t clock, clock_lo, clock_hi, hi_check;
4198 
4199 	switch (adev->ip_versions[GC_HWIP][0]) {
4200 	case IP_VERSION(9, 3, 0):
4201 		preempt_disable();
4202 		clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4203 		clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4204 		hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4205 		/* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
4206 		 * roughly every 42 seconds.
4207 		 */
4208 		if (hi_check != clock_hi) {
4209 			clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4210 			clock_hi = hi_check;
4211 		}
4212 		preempt_enable();
4213 		clock = clock_lo | (clock_hi << 32ULL);
4214 		break;
4215 	default:
4216 		amdgpu_gfx_off_ctrl(adev, false);
4217 		mutex_lock(&adev->gfx.gpu_clock_mutex);
4218 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
4219 			clock = gfx_v9_0_kiq_read_clock(adev);
4220 		} else {
4221 			WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4222 			clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4223 				((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4224 		}
4225 		mutex_unlock(&adev->gfx.gpu_clock_mutex);
4226 		amdgpu_gfx_off_ctrl(adev, true);
4227 		break;
4228 	}
4229 	return clock;
4230 }
4231 
4232 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4233 					  uint32_t vmid,
4234 					  uint32_t gds_base, uint32_t gds_size,
4235 					  uint32_t gws_base, uint32_t gws_size,
4236 					  uint32_t oa_base, uint32_t oa_size)
4237 {
4238 	struct amdgpu_device *adev = ring->adev;
4239 
4240 	/* GDS Base */
4241 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4242 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4243 				   gds_base);
4244 
4245 	/* GDS Size */
4246 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4247 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4248 				   gds_size);
4249 
4250 	/* GWS */
4251 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4252 				   SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4253 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4254 
4255 	/* OA */
4256 	gfx_v9_0_write_data_to_reg(ring, 0, false,
4257 				   SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4258 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
4259 }
4260 
4261 static const u32 vgpr_init_compute_shader[] =
4262 {
4263 	0xb07c0000, 0xbe8000ff,
4264 	0x000000f8, 0xbf110800,
4265 	0x7e000280, 0x7e020280,
4266 	0x7e040280, 0x7e060280,
4267 	0x7e080280, 0x7e0a0280,
4268 	0x7e0c0280, 0x7e0e0280,
4269 	0x80808800, 0xbe803200,
4270 	0xbf84fff5, 0xbf9c0000,
4271 	0xd28c0001, 0x0001007f,
4272 	0xd28d0001, 0x0002027e,
4273 	0x10020288, 0xb8810904,
4274 	0xb7814000, 0xd1196a01,
4275 	0x00000301, 0xbe800087,
4276 	0xbefc00c1, 0xd89c4000,
4277 	0x00020201, 0xd89cc080,
4278 	0x00040401, 0x320202ff,
4279 	0x00000800, 0x80808100,
4280 	0xbf84fff8, 0x7e020280,
4281 	0xbf810000, 0x00000000,
4282 };
4283 
4284 static const u32 sgpr_init_compute_shader[] =
4285 {
4286 	0xb07c0000, 0xbe8000ff,
4287 	0x0000005f, 0xbee50080,
4288 	0xbe812c65, 0xbe822c65,
4289 	0xbe832c65, 0xbe842c65,
4290 	0xbe852c65, 0xb77c0005,
4291 	0x80808500, 0xbf84fff8,
4292 	0xbe800080, 0xbf810000,
4293 };
4294 
4295 static const u32 vgpr_init_compute_shader_arcturus[] = {
4296 	0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4297 	0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4298 	0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4299 	0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4300 	0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4301 	0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4302 	0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4303 	0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4304 	0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4305 	0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4306 	0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4307 	0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4308 	0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4309 	0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4310 	0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4311 	0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4312 	0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4313 	0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4314 	0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4315 	0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4316 	0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4317 	0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4318 	0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4319 	0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4320 	0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4321 	0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4322 	0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4323 	0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4324 	0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4325 	0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4326 	0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4327 	0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4328 	0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4329 	0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4330 	0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4331 	0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4332 	0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4333 	0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4334 	0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4335 	0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4336 	0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4337 	0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4338 	0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4339 	0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4340 	0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4341 	0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4342 	0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4343 	0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4344 	0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4345 	0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4346 	0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4347 	0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4348 	0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4349 	0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4350 	0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4351 	0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4352 	0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4353 	0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4354 	0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4355 	0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4356 	0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4357 	0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4358 	0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4359 	0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4360 	0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4361 	0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4362 	0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4363 	0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4364 	0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4365 	0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4366 	0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4367 	0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4368 	0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4369 	0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4370 	0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4371 	0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4372 	0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4373 	0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4374 	0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4375 	0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4376 	0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4377 	0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4378 	0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4379 	0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4380 	0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4381 	0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4382 	0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4383 	0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4384 	0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4385 	0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4386 	0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4387 	0xbf84fff8, 0xbf810000,
4388 };
4389 
4390 /* When below register arrays changed, please update gpr_reg_size,
4391   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4392   to cover all gfx9 ASICs */
4393 static const struct soc15_reg_entry vgpr_init_regs[] = {
4394    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4395    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4396    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4397    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4398    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4399    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4400    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4401    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4402    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4403    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4404    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4405    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4406    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4407    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4408 };
4409 
4410 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4411    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4412    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4413    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4414    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4415    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4416    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4417    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4418    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4419    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4420    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4421    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4422    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4423    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4424    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4425 };
4426 
4427 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4428    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4429    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4430    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4431    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4432    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4433    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4434    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4435    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4436    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4437    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4438    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4439    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4440    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4441    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4442 };
4443 
4444 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4445    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4446    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4447    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4448    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4449    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4450    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4451    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4452    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4453    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4454    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4455    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4456    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4457    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4458    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4459 };
4460 
4461 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4462    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4463    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4464    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4465    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4466    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4467    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4468    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4469    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4470    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4471    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4472    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4473    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4474    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4475    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4476    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4477    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4478    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4479    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4480    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4481    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4482    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4483    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4484    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4485    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4486    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4487    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4488    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4489    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4490    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4491    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4492    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4493    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4494    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4495 };
4496 
4497 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4498 {
4499 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4500 	int i, r;
4501 
4502 	/* only support when RAS is enabled */
4503 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4504 		return 0;
4505 
4506 	r = amdgpu_ring_alloc(ring, 7);
4507 	if (r) {
4508 		DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4509 			ring->name, r);
4510 		return r;
4511 	}
4512 
4513 	WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4514 	WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4515 
4516 	amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4517 	amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4518 				PACKET3_DMA_DATA_DST_SEL(1) |
4519 				PACKET3_DMA_DATA_SRC_SEL(2) |
4520 				PACKET3_DMA_DATA_ENGINE(0)));
4521 	amdgpu_ring_write(ring, 0);
4522 	amdgpu_ring_write(ring, 0);
4523 	amdgpu_ring_write(ring, 0);
4524 	amdgpu_ring_write(ring, 0);
4525 	amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4526 				adev->gds.gds_size);
4527 
4528 	amdgpu_ring_commit(ring);
4529 
4530 	for (i = 0; i < adev->usec_timeout; i++) {
4531 		if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4532 			break;
4533 		udelay(1);
4534 	}
4535 
4536 	if (i >= adev->usec_timeout)
4537 		r = -ETIMEDOUT;
4538 
4539 	WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4540 
4541 	return r;
4542 }
4543 
4544 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4545 {
4546 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4547 	struct amdgpu_ib ib;
4548 	struct dma_fence *f = NULL;
4549 	int r, i;
4550 	unsigned total_size, vgpr_offset, sgpr_offset;
4551 	u64 gpu_addr;
4552 
4553 	int compute_dim_x = adev->gfx.config.max_shader_engines *
4554 						adev->gfx.config.max_cu_per_sh *
4555 						adev->gfx.config.max_sh_per_se;
4556 	int sgpr_work_group_size = 5;
4557 	int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4558 	int vgpr_init_shader_size;
4559 	const u32 *vgpr_init_shader_ptr;
4560 	const struct soc15_reg_entry *vgpr_init_regs_ptr;
4561 
4562 	/* only support when RAS is enabled */
4563 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4564 		return 0;
4565 
4566 	/* bail if the compute ring is not ready */
4567 	if (!ring->sched.ready)
4568 		return 0;
4569 
4570 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
4571 		vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4572 		vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4573 		vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4574 	} else {
4575 		vgpr_init_shader_ptr = vgpr_init_compute_shader;
4576 		vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4577 		vgpr_init_regs_ptr = vgpr_init_regs;
4578 	}
4579 
4580 	total_size =
4581 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4582 	total_size +=
4583 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4584 	total_size +=
4585 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4586 	total_size = ALIGN(total_size, 256);
4587 	vgpr_offset = total_size;
4588 	total_size += ALIGN(vgpr_init_shader_size, 256);
4589 	sgpr_offset = total_size;
4590 	total_size += sizeof(sgpr_init_compute_shader);
4591 
4592 	/* allocate an indirect buffer to put the commands in */
4593 	memset(&ib, 0, sizeof(ib));
4594 	r = amdgpu_ib_get(adev, NULL, total_size,
4595 					AMDGPU_IB_POOL_DIRECT, &ib);
4596 	if (r) {
4597 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4598 		return r;
4599 	}
4600 
4601 	/* load the compute shaders */
4602 	for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4603 		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4604 
4605 	for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4606 		ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4607 
4608 	/* init the ib length to 0 */
4609 	ib.length_dw = 0;
4610 
4611 	/* VGPR */
4612 	/* write the register state for the compute dispatch */
4613 	for (i = 0; i < gpr_reg_size; i++) {
4614 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4615 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4616 								- PACKET3_SET_SH_REG_START;
4617 		ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4618 	}
4619 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4620 	gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4621 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4622 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4623 							- PACKET3_SET_SH_REG_START;
4624 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4625 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4626 
4627 	/* write dispatch packet */
4628 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4629 	ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4630 	ib.ptr[ib.length_dw++] = 1; /* y */
4631 	ib.ptr[ib.length_dw++] = 1; /* z */
4632 	ib.ptr[ib.length_dw++] =
4633 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4634 
4635 	/* write CS partial flush packet */
4636 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4637 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4638 
4639 	/* SGPR1 */
4640 	/* write the register state for the compute dispatch */
4641 	for (i = 0; i < gpr_reg_size; i++) {
4642 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4643 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4644 								- PACKET3_SET_SH_REG_START;
4645 		ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4646 	}
4647 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4648 	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4649 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4650 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4651 							- PACKET3_SET_SH_REG_START;
4652 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4653 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4654 
4655 	/* write dispatch packet */
4656 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4657 	ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4658 	ib.ptr[ib.length_dw++] = 1; /* y */
4659 	ib.ptr[ib.length_dw++] = 1; /* z */
4660 	ib.ptr[ib.length_dw++] =
4661 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4662 
4663 	/* write CS partial flush packet */
4664 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4665 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4666 
4667 	/* SGPR2 */
4668 	/* write the register state for the compute dispatch */
4669 	for (i = 0; i < gpr_reg_size; i++) {
4670 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4671 		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4672 								- PACKET3_SET_SH_REG_START;
4673 		ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4674 	}
4675 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4676 	gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4677 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4678 	ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4679 							- PACKET3_SET_SH_REG_START;
4680 	ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4681 	ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4682 
4683 	/* write dispatch packet */
4684 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4685 	ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4686 	ib.ptr[ib.length_dw++] = 1; /* y */
4687 	ib.ptr[ib.length_dw++] = 1; /* z */
4688 	ib.ptr[ib.length_dw++] =
4689 		REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4690 
4691 	/* write CS partial flush packet */
4692 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4693 	ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4694 
4695 	/* shedule the ib on the ring */
4696 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4697 	if (r) {
4698 		DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4699 		goto fail;
4700 	}
4701 
4702 	/* wait for the GPU to finish processing the IB */
4703 	r = dma_fence_wait(f, false);
4704 	if (r) {
4705 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4706 		goto fail;
4707 	}
4708 
4709 fail:
4710 	amdgpu_ib_free(adev, &ib, NULL);
4711 	dma_fence_put(f);
4712 
4713 	return r;
4714 }
4715 
4716 static int gfx_v9_0_early_init(void *handle)
4717 {
4718 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4719 
4720 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
4721 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
4722 		adev->gfx.num_gfx_rings = 0;
4723 	else
4724 		adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4725 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4726 					  AMDGPU_MAX_COMPUTE_RINGS);
4727 	gfx_v9_0_set_kiq_pm4_funcs(adev);
4728 	gfx_v9_0_set_ring_funcs(adev);
4729 	gfx_v9_0_set_irq_funcs(adev);
4730 	gfx_v9_0_set_gds_init(adev);
4731 	gfx_v9_0_set_rlc_funcs(adev);
4732 
4733 	/* init rlcg reg access ctrl */
4734 	gfx_v9_0_init_rlcg_reg_access_ctrl(adev);
4735 
4736 	return 0;
4737 }
4738 
4739 static int gfx_v9_0_ecc_late_init(void *handle)
4740 {
4741 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4742 	int r;
4743 
4744 	/*
4745 	 * Temp workaround to fix the issue that CP firmware fails to
4746 	 * update read pointer when CPDMA is writing clearing operation
4747 	 * to GDS in suspend/resume sequence on several cards. So just
4748 	 * limit this operation in cold boot sequence.
4749 	 */
4750 	if ((!adev->in_suspend) &&
4751 	    (adev->gds.gds_size)) {
4752 		r = gfx_v9_0_do_edc_gds_workarounds(adev);
4753 		if (r)
4754 			return r;
4755 	}
4756 
4757 	/* requires IBs so do in late init after IB pool is initialized */
4758 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
4759 		r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
4760 	else
4761 		r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4762 
4763 	if (r)
4764 		return r;
4765 
4766 	if (adev->gfx.ras &&
4767 	    adev->gfx.ras->enable_watchdog_timer)
4768 		adev->gfx.ras->enable_watchdog_timer(adev);
4769 
4770 	return 0;
4771 }
4772 
4773 static int gfx_v9_0_late_init(void *handle)
4774 {
4775 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4776 	int r;
4777 
4778 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4779 	if (r)
4780 		return r;
4781 
4782 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4783 	if (r)
4784 		return r;
4785 
4786 	r = gfx_v9_0_ecc_late_init(handle);
4787 	if (r)
4788 		return r;
4789 
4790 	return 0;
4791 }
4792 
4793 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4794 {
4795 	uint32_t rlc_setting;
4796 
4797 	/* if RLC is not enabled, do nothing */
4798 	rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4799 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4800 		return false;
4801 
4802 	return true;
4803 }
4804 
4805 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4806 {
4807 	uint32_t data;
4808 	unsigned i;
4809 
4810 	data = RLC_SAFE_MODE__CMD_MASK;
4811 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4812 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4813 
4814 	/* wait for RLC_SAFE_MODE */
4815 	for (i = 0; i < adev->usec_timeout; i++) {
4816 		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4817 			break;
4818 		udelay(1);
4819 	}
4820 }
4821 
4822 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
4823 {
4824 	uint32_t data;
4825 
4826 	data = RLC_SAFE_MODE__CMD_MASK;
4827 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4828 }
4829 
4830 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4831 						bool enable)
4832 {
4833 	amdgpu_gfx_rlc_enter_safe_mode(adev);
4834 
4835 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4836 		gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4837 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4838 			gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4839 	} else {
4840 		gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4841 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4842 			gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4843 	}
4844 
4845 	amdgpu_gfx_rlc_exit_safe_mode(adev);
4846 }
4847 
4848 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4849 						bool enable)
4850 {
4851 	/* TODO: double check if we need to perform under safe mode */
4852 	/* gfx_v9_0_enter_rlc_safe_mode(adev); */
4853 
4854 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4855 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4856 	else
4857 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4858 
4859 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4860 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4861 	else
4862 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4863 
4864 	/* gfx_v9_0_exit_rlc_safe_mode(adev); */
4865 }
4866 
4867 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4868 						      bool enable)
4869 {
4870 	uint32_t data, def;
4871 
4872 	amdgpu_gfx_rlc_enter_safe_mode(adev);
4873 
4874 	/* It is disabled by HW by default */
4875 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4876 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
4877 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4878 
4879 		if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))
4880 			data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4881 
4882 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4883 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4884 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4885 
4886 		/* only for Vega10 & Raven1 */
4887 		data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4888 
4889 		if (def != data)
4890 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4891 
4892 		/* MGLS is a global flag to control all MGLS in GFX */
4893 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4894 			/* 2 - RLC memory Light sleep */
4895 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4896 				def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4897 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4898 				if (def != data)
4899 					WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4900 			}
4901 			/* 3 - CP memory Light sleep */
4902 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4903 				def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4904 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4905 				if (def != data)
4906 					WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4907 			}
4908 		}
4909 	} else {
4910 		/* 1 - MGCG_OVERRIDE */
4911 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4912 
4913 		if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))
4914 			data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4915 
4916 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4917 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4918 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4919 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4920 
4921 		if (def != data)
4922 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4923 
4924 		/* 2 - disable MGLS in RLC */
4925 		data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4926 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4927 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4928 			WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4929 		}
4930 
4931 		/* 3 - disable MGLS in CP */
4932 		data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4933 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4934 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4935 			WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4936 		}
4937 	}
4938 
4939 	amdgpu_gfx_rlc_exit_safe_mode(adev);
4940 }
4941 
4942 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
4943 					   bool enable)
4944 {
4945 	uint32_t data, def;
4946 
4947 	if (!adev->gfx.num_gfx_rings)
4948 		return;
4949 
4950 	amdgpu_gfx_rlc_enter_safe_mode(adev);
4951 
4952 	/* Enable 3D CGCG/CGLS */
4953 	if (enable) {
4954 		/* write cmd to clear cgcg/cgls ov */
4955 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4956 		/* unset CGCG override */
4957 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4958 		/* update CGCG and CGLS override bits */
4959 		if (def != data)
4960 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4961 
4962 		/* enable 3Dcgcg FSM(0x0000363f) */
4963 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4964 
4965 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
4966 			data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4967 				RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4968 		else
4969 			data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
4970 
4971 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4972 			data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4973 				RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4974 		if (def != data)
4975 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4976 
4977 		/* set IDLE_POLL_COUNT(0x00900100) */
4978 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4979 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4980 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4981 		if (def != data)
4982 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4983 	} else {
4984 		/* Disable CGCG/CGLS */
4985 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4986 		/* disable cgcg, cgls should be disabled */
4987 		data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4988 			  RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4989 		/* disable cgcg and cgls in FSM */
4990 		if (def != data)
4991 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4992 	}
4993 
4994 	amdgpu_gfx_rlc_exit_safe_mode(adev);
4995 }
4996 
4997 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4998 						      bool enable)
4999 {
5000 	uint32_t def, data;
5001 
5002 	amdgpu_gfx_rlc_enter_safe_mode(adev);
5003 
5004 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5005 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5006 		/* unset CGCG override */
5007 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5008 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5009 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5010 		else
5011 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5012 		/* update CGCG and CGLS override bits */
5013 		if (def != data)
5014 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
5015 
5016 		/* enable cgcg FSM(0x0000363F) */
5017 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5018 
5019 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1))
5020 			data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5021 				RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5022 		else
5023 			data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5024 				RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5025 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5026 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5027 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5028 		if (def != data)
5029 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5030 
5031 		/* set IDLE_POLL_COUNT(0x00900100) */
5032 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
5033 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5034 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5035 		if (def != data)
5036 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
5037 	} else {
5038 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5039 		/* reset CGCG/CGLS bits */
5040 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5041 		/* disable cgcg and cgls in FSM */
5042 		if (def != data)
5043 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
5044 	}
5045 
5046 	amdgpu_gfx_rlc_exit_safe_mode(adev);
5047 }
5048 
5049 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5050 					    bool enable)
5051 {
5052 	if (enable) {
5053 		/* CGCG/CGLS should be enabled after MGCG/MGLS
5054 		 * ===  MGCG + MGLS ===
5055 		 */
5056 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5057 		/* ===  CGCG /CGLS for GFX 3D Only === */
5058 		gfx_v9_0_update_3d_clock_gating(adev, enable);
5059 		/* ===  CGCG + CGLS === */
5060 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5061 	} else {
5062 		/* CGCG/CGLS should be disabled before MGCG/MGLS
5063 		 * ===  CGCG + CGLS ===
5064 		 */
5065 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
5066 		/* ===  CGCG /CGLS for GFX 3D Only === */
5067 		gfx_v9_0_update_3d_clock_gating(adev, enable);
5068 		/* ===  MGCG + MGLS === */
5069 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
5070 	}
5071 	return 0;
5072 }
5073 
5074 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5075 {
5076 	u32 reg, data;
5077 
5078 	amdgpu_gfx_off_ctrl(adev, false);
5079 
5080 	reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
5081 	if (amdgpu_sriov_is_pp_one_vf(adev))
5082 		data = RREG32_NO_KIQ(reg);
5083 	else
5084 		data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
5085 
5086 	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5087 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5088 
5089 	if (amdgpu_sriov_is_pp_one_vf(adev))
5090 		WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
5091 	else
5092 		WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
5093 
5094 	amdgpu_gfx_off_ctrl(adev, true);
5095 }
5096 
5097 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
5098 					uint32_t offset,
5099 					struct soc15_reg_rlcg *entries, int arr_size)
5100 {
5101 	int i;
5102 	uint32_t reg;
5103 
5104 	if (!entries)
5105 		return false;
5106 
5107 	for (i = 0; i < arr_size; i++) {
5108 		const struct soc15_reg_rlcg *entry;
5109 
5110 		entry = &entries[i];
5111 		reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
5112 		if (offset == reg)
5113 			return true;
5114 	}
5115 
5116 	return false;
5117 }
5118 
5119 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5120 {
5121 	return gfx_v9_0_check_rlcg_range(adev, offset,
5122 					(void *)rlcg_access_gc_9_0,
5123 					ARRAY_SIZE(rlcg_access_gc_9_0));
5124 }
5125 
5126 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5127 	.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5128 	.set_safe_mode = gfx_v9_0_set_safe_mode,
5129 	.unset_safe_mode = gfx_v9_0_unset_safe_mode,
5130 	.init = gfx_v9_0_rlc_init,
5131 	.get_csb_size = gfx_v9_0_get_csb_size,
5132 	.get_csb_buffer = gfx_v9_0_get_csb_buffer,
5133 	.get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5134 	.resume = gfx_v9_0_rlc_resume,
5135 	.stop = gfx_v9_0_rlc_stop,
5136 	.reset = gfx_v9_0_rlc_reset,
5137 	.start = gfx_v9_0_rlc_start,
5138 	.update_spm_vmid = gfx_v9_0_update_spm_vmid,
5139 	.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5140 };
5141 
5142 static int gfx_v9_0_set_powergating_state(void *handle,
5143 					  enum amd_powergating_state state)
5144 {
5145 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5146 	bool enable = (state == AMD_PG_STATE_GATE);
5147 
5148 	switch (adev->ip_versions[GC_HWIP][0]) {
5149 	case IP_VERSION(9, 2, 2):
5150 	case IP_VERSION(9, 1, 0):
5151 	case IP_VERSION(9, 3, 0):
5152 		if (!enable)
5153 			amdgpu_gfx_off_ctrl(adev, false);
5154 
5155 		if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5156 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5157 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5158 		} else {
5159 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5160 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5161 		}
5162 
5163 		if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5164 			gfx_v9_0_enable_cp_power_gating(adev, true);
5165 		else
5166 			gfx_v9_0_enable_cp_power_gating(adev, false);
5167 
5168 		/* update gfx cgpg state */
5169 		gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5170 
5171 		/* update mgcg state */
5172 		gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5173 
5174 		if (enable)
5175 			amdgpu_gfx_off_ctrl(adev, true);
5176 		break;
5177 	case IP_VERSION(9, 2, 1):
5178 		amdgpu_gfx_off_ctrl(adev, enable);
5179 		break;
5180 	default:
5181 		break;
5182 	}
5183 
5184 	return 0;
5185 }
5186 
5187 static int gfx_v9_0_set_clockgating_state(void *handle,
5188 					  enum amd_clockgating_state state)
5189 {
5190 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5191 
5192 	if (amdgpu_sriov_vf(adev))
5193 		return 0;
5194 
5195 	switch (adev->ip_versions[GC_HWIP][0]) {
5196 	case IP_VERSION(9, 0, 1):
5197 	case IP_VERSION(9, 2, 1):
5198 	case IP_VERSION(9, 4, 0):
5199 	case IP_VERSION(9, 2, 2):
5200 	case IP_VERSION(9, 1, 0):
5201 	case IP_VERSION(9, 4, 1):
5202 	case IP_VERSION(9, 3, 0):
5203 	case IP_VERSION(9, 4, 2):
5204 		gfx_v9_0_update_gfx_clock_gating(adev,
5205 						 state == AMD_CG_STATE_GATE);
5206 		break;
5207 	default:
5208 		break;
5209 	}
5210 	return 0;
5211 }
5212 
5213 static void gfx_v9_0_get_clockgating_state(void *handle, u64 *flags)
5214 {
5215 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5216 	int data;
5217 
5218 	if (amdgpu_sriov_vf(adev))
5219 		*flags = 0;
5220 
5221 	/* AMD_CG_SUPPORT_GFX_MGCG */
5222 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5223 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5224 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
5225 
5226 	/* AMD_CG_SUPPORT_GFX_CGCG */
5227 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5228 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5229 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
5230 
5231 	/* AMD_CG_SUPPORT_GFX_CGLS */
5232 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5233 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
5234 
5235 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
5236 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5237 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5238 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5239 
5240 	/* AMD_CG_SUPPORT_GFX_CP_LS */
5241 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5242 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5243 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5244 
5245 	if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) {
5246 		/* AMD_CG_SUPPORT_GFX_3D_CGCG */
5247 		data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5248 		if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5249 			*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5250 
5251 		/* AMD_CG_SUPPORT_GFX_3D_CGLS */
5252 		if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5253 			*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5254 	}
5255 }
5256 
5257 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5258 {
5259 	return *ring->rptr_cpu_addr; /* gfx9 is 32bit rptr*/
5260 }
5261 
5262 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5263 {
5264 	struct amdgpu_device *adev = ring->adev;
5265 	u64 wptr;
5266 
5267 	/* XXX check if swapping is necessary on BE */
5268 	if (ring->use_doorbell) {
5269 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5270 	} else {
5271 		wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5272 		wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5273 	}
5274 
5275 	return wptr;
5276 }
5277 
5278 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5279 {
5280 	struct amdgpu_device *adev = ring->adev;
5281 
5282 	if (ring->use_doorbell) {
5283 		/* XXX check if swapping is necessary on BE */
5284 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
5285 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5286 	} else {
5287 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5288 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5289 	}
5290 }
5291 
5292 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5293 {
5294 	struct amdgpu_device *adev = ring->adev;
5295 	u32 ref_and_mask, reg_mem_engine;
5296 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5297 
5298 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5299 		switch (ring->me) {
5300 		case 1:
5301 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5302 			break;
5303 		case 2:
5304 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5305 			break;
5306 		default:
5307 			return;
5308 		}
5309 		reg_mem_engine = 0;
5310 	} else {
5311 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5312 		reg_mem_engine = 1; /* pfp */
5313 	}
5314 
5315 	gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5316 			      adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5317 			      adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5318 			      ref_and_mask, ref_and_mask, 0x20);
5319 }
5320 
5321 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5322 					struct amdgpu_job *job,
5323 					struct amdgpu_ib *ib,
5324 					uint32_t flags)
5325 {
5326 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5327 	u32 header, control = 0;
5328 
5329 	if (ib->flags & AMDGPU_IB_FLAG_CE)
5330 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5331 	else
5332 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5333 
5334 	control |= ib->length_dw | (vmid << 24);
5335 
5336 	if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5337 		control |= INDIRECT_BUFFER_PRE_ENB(1);
5338 
5339 		if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5340 			gfx_v9_0_ring_emit_de_meta(ring);
5341 	}
5342 
5343 	amdgpu_ring_write(ring, header);
5344 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5345 	amdgpu_ring_write(ring,
5346 #ifdef __BIG_ENDIAN
5347 		(2 << 0) |
5348 #endif
5349 		lower_32_bits(ib->gpu_addr));
5350 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5351 	amdgpu_ring_write(ring, control);
5352 }
5353 
5354 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5355 					  struct amdgpu_job *job,
5356 					  struct amdgpu_ib *ib,
5357 					  uint32_t flags)
5358 {
5359 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5360 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5361 
5362 	/* Currently, there is a high possibility to get wave ID mismatch
5363 	 * between ME and GDS, leading to a hw deadlock, because ME generates
5364 	 * different wave IDs than the GDS expects. This situation happens
5365 	 * randomly when at least 5 compute pipes use GDS ordered append.
5366 	 * The wave IDs generated by ME are also wrong after suspend/resume.
5367 	 * Those are probably bugs somewhere else in the kernel driver.
5368 	 *
5369 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5370 	 * GDS to 0 for this ring (me/pipe).
5371 	 */
5372 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5373 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5374 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5375 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5376 	}
5377 
5378 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5379 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5380 	amdgpu_ring_write(ring,
5381 #ifdef __BIG_ENDIAN
5382 				(2 << 0) |
5383 #endif
5384 				lower_32_bits(ib->gpu_addr));
5385 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5386 	amdgpu_ring_write(ring, control);
5387 }
5388 
5389 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5390 				     u64 seq, unsigned flags)
5391 {
5392 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5393 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5394 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5395 
5396 	/* RELEASE_MEM - flush caches, send int */
5397 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5398 	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
5399 					       EOP_TC_NC_ACTION_EN) :
5400 					      (EOP_TCL1_ACTION_EN |
5401 					       EOP_TC_ACTION_EN |
5402 					       EOP_TC_WB_ACTION_EN |
5403 					       EOP_TC_MD_ACTION_EN)) |
5404 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5405 				 EVENT_INDEX(5)));
5406 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5407 
5408 	/*
5409 	 * the address should be Qword aligned if 64bit write, Dword
5410 	 * aligned if only send 32bit data low (discard data high)
5411 	 */
5412 	if (write64bit)
5413 		BUG_ON(addr & 0x7);
5414 	else
5415 		BUG_ON(addr & 0x3);
5416 	amdgpu_ring_write(ring, lower_32_bits(addr));
5417 	amdgpu_ring_write(ring, upper_32_bits(addr));
5418 	amdgpu_ring_write(ring, lower_32_bits(seq));
5419 	amdgpu_ring_write(ring, upper_32_bits(seq));
5420 	amdgpu_ring_write(ring, 0);
5421 }
5422 
5423 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5424 {
5425 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5426 	uint32_t seq = ring->fence_drv.sync_seq;
5427 	uint64_t addr = ring->fence_drv.gpu_addr;
5428 
5429 	gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5430 			      lower_32_bits(addr), upper_32_bits(addr),
5431 			      seq, 0xffffffff, 4);
5432 }
5433 
5434 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5435 					unsigned vmid, uint64_t pd_addr)
5436 {
5437 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5438 
5439 	/* compute doesn't have PFP */
5440 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5441 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
5442 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5443 		amdgpu_ring_write(ring, 0x0);
5444 	}
5445 }
5446 
5447 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5448 {
5449 	return *ring->rptr_cpu_addr; /* gfx9 hardware is 32bit rptr */
5450 }
5451 
5452 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5453 {
5454 	u64 wptr;
5455 
5456 	/* XXX check if swapping is necessary on BE */
5457 	if (ring->use_doorbell)
5458 		wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5459 	else
5460 		BUG();
5461 	return wptr;
5462 }
5463 
5464 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5465 {
5466 	struct amdgpu_device *adev = ring->adev;
5467 
5468 	/* XXX check if swapping is necessary on BE */
5469 	if (ring->use_doorbell) {
5470 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
5471 		WDOORBELL64(ring->doorbell_index, ring->wptr);
5472 	} else{
5473 		BUG(); /* only DOORBELL method supported on gfx9 now */
5474 	}
5475 }
5476 
5477 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5478 					 u64 seq, unsigned int flags)
5479 {
5480 	struct amdgpu_device *adev = ring->adev;
5481 
5482 	/* we only allocate 32bit for each seq wb address */
5483 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5484 
5485 	/* write fence seq to the "addr" */
5486 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5487 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5488 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5489 	amdgpu_ring_write(ring, lower_32_bits(addr));
5490 	amdgpu_ring_write(ring, upper_32_bits(addr));
5491 	amdgpu_ring_write(ring, lower_32_bits(seq));
5492 
5493 	if (flags & AMDGPU_FENCE_FLAG_INT) {
5494 		/* set register to trigger INT */
5495 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5496 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5497 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5498 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5499 		amdgpu_ring_write(ring, 0);
5500 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5501 	}
5502 }
5503 
5504 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5505 {
5506 	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5507 	amdgpu_ring_write(ring, 0);
5508 }
5509 
5510 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
5511 {
5512 	struct v9_ce_ib_state ce_payload = {0};
5513 	uint64_t csa_addr;
5514 	int cnt;
5515 
5516 	cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5517 	csa_addr = amdgpu_csa_vaddr(ring->adev);
5518 
5519 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5520 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5521 				 WRITE_DATA_DST_SEL(8) |
5522 				 WR_CONFIRM) |
5523 				 WRITE_DATA_CACHE_POLICY(0));
5524 	amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5525 	amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5526 	amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
5527 }
5528 
5529 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
5530 {
5531 	struct v9_de_ib_state de_payload = {0};
5532 	uint64_t csa_addr, gds_addr;
5533 	int cnt;
5534 
5535 	csa_addr = amdgpu_csa_vaddr(ring->adev);
5536 	gds_addr = csa_addr + 4096;
5537 	de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5538 	de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5539 
5540 	cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5541 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5542 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5543 				 WRITE_DATA_DST_SEL(8) |
5544 				 WR_CONFIRM) |
5545 				 WRITE_DATA_CACHE_POLICY(0));
5546 	amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5547 	amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5548 	amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
5549 }
5550 
5551 static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5552 				   bool secure)
5553 {
5554 	uint32_t v = secure ? FRAME_TMZ : 0;
5555 
5556 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5557 	amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5558 }
5559 
5560 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5561 {
5562 	uint32_t dw2 = 0;
5563 
5564 	if (amdgpu_sriov_vf(ring->adev))
5565 		gfx_v9_0_ring_emit_ce_meta(ring);
5566 
5567 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5568 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5569 		/* set load_global_config & load_global_uconfig */
5570 		dw2 |= 0x8001;
5571 		/* set load_cs_sh_regs */
5572 		dw2 |= 0x01000000;
5573 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
5574 		dw2 |= 0x10002;
5575 
5576 		/* set load_ce_ram if preamble presented */
5577 		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5578 			dw2 |= 0x10000000;
5579 	} else {
5580 		/* still load_ce_ram if this is the first time preamble presented
5581 		 * although there is no context switch happens.
5582 		 */
5583 		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5584 			dw2 |= 0x10000000;
5585 	}
5586 
5587 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5588 	amdgpu_ring_write(ring, dw2);
5589 	amdgpu_ring_write(ring, 0);
5590 }
5591 
5592 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5593 {
5594 	unsigned ret;
5595 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5596 	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5597 	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5598 	amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5599 	ret = ring->wptr & ring->buf_mask;
5600 	amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5601 	return ret;
5602 }
5603 
5604 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5605 {
5606 	unsigned cur;
5607 	BUG_ON(offset > ring->buf_mask);
5608 	BUG_ON(ring->ring[offset] != 0x55aa55aa);
5609 
5610 	cur = (ring->wptr & ring->buf_mask) - 1;
5611 	if (likely(cur > offset))
5612 		ring->ring[offset] = cur - offset;
5613 	else
5614 		ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
5615 }
5616 
5617 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5618 				    uint32_t reg_val_offs)
5619 {
5620 	struct amdgpu_device *adev = ring->adev;
5621 
5622 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5623 	amdgpu_ring_write(ring, 0 |	/* src: register*/
5624 				(5 << 8) |	/* dst: memory */
5625 				(1 << 20));	/* write confirm */
5626 	amdgpu_ring_write(ring, reg);
5627 	amdgpu_ring_write(ring, 0);
5628 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5629 				reg_val_offs * 4));
5630 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5631 				reg_val_offs * 4));
5632 }
5633 
5634 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5635 				    uint32_t val)
5636 {
5637 	uint32_t cmd = 0;
5638 
5639 	switch (ring->funcs->type) {
5640 	case AMDGPU_RING_TYPE_GFX:
5641 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5642 		break;
5643 	case AMDGPU_RING_TYPE_KIQ:
5644 		cmd = (1 << 16); /* no inc addr */
5645 		break;
5646 	default:
5647 		cmd = WR_CONFIRM;
5648 		break;
5649 	}
5650 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5651 	amdgpu_ring_write(ring, cmd);
5652 	amdgpu_ring_write(ring, reg);
5653 	amdgpu_ring_write(ring, 0);
5654 	amdgpu_ring_write(ring, val);
5655 }
5656 
5657 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5658 					uint32_t val, uint32_t mask)
5659 {
5660 	gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5661 }
5662 
5663 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5664 						  uint32_t reg0, uint32_t reg1,
5665 						  uint32_t ref, uint32_t mask)
5666 {
5667 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5668 	struct amdgpu_device *adev = ring->adev;
5669 	bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5670 		adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5671 
5672 	if (fw_version_ok)
5673 		gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5674 				      ref, mask, 0x20);
5675 	else
5676 		amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5677 							   ref, mask);
5678 }
5679 
5680 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5681 {
5682 	struct amdgpu_device *adev = ring->adev;
5683 	uint32_t value = 0;
5684 
5685 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5686 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5687 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5688 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5689 	WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5690 }
5691 
5692 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5693 						 enum amdgpu_interrupt_state state)
5694 {
5695 	switch (state) {
5696 	case AMDGPU_IRQ_STATE_DISABLE:
5697 	case AMDGPU_IRQ_STATE_ENABLE:
5698 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5699 			       TIME_STAMP_INT_ENABLE,
5700 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5701 		break;
5702 	default:
5703 		break;
5704 	}
5705 }
5706 
5707 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5708 						     int me, int pipe,
5709 						     enum amdgpu_interrupt_state state)
5710 {
5711 	u32 mec_int_cntl, mec_int_cntl_reg;
5712 
5713 	/*
5714 	 * amdgpu controls only the first MEC. That's why this function only
5715 	 * handles the setting of interrupts for this specific MEC. All other
5716 	 * pipes' interrupts are set by amdkfd.
5717 	 */
5718 
5719 	if (me == 1) {
5720 		switch (pipe) {
5721 		case 0:
5722 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5723 			break;
5724 		case 1:
5725 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5726 			break;
5727 		case 2:
5728 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5729 			break;
5730 		case 3:
5731 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5732 			break;
5733 		default:
5734 			DRM_DEBUG("invalid pipe %d\n", pipe);
5735 			return;
5736 		}
5737 	} else {
5738 		DRM_DEBUG("invalid me %d\n", me);
5739 		return;
5740 	}
5741 
5742 	switch (state) {
5743 	case AMDGPU_IRQ_STATE_DISABLE:
5744 		mec_int_cntl = RREG32_SOC15_IP(GC,mec_int_cntl_reg);
5745 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5746 					     TIME_STAMP_INT_ENABLE, 0);
5747 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5748 		break;
5749 	case AMDGPU_IRQ_STATE_ENABLE:
5750 		mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5751 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5752 					     TIME_STAMP_INT_ENABLE, 1);
5753 		WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5754 		break;
5755 	default:
5756 		break;
5757 	}
5758 }
5759 
5760 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5761 					     struct amdgpu_irq_src *source,
5762 					     unsigned type,
5763 					     enum amdgpu_interrupt_state state)
5764 {
5765 	switch (state) {
5766 	case AMDGPU_IRQ_STATE_DISABLE:
5767 	case AMDGPU_IRQ_STATE_ENABLE:
5768 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5769 			       PRIV_REG_INT_ENABLE,
5770 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5771 		break;
5772 	default:
5773 		break;
5774 	}
5775 
5776 	return 0;
5777 }
5778 
5779 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5780 					      struct amdgpu_irq_src *source,
5781 					      unsigned type,
5782 					      enum amdgpu_interrupt_state state)
5783 {
5784 	switch (state) {
5785 	case AMDGPU_IRQ_STATE_DISABLE:
5786 	case AMDGPU_IRQ_STATE_ENABLE:
5787 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5788 			       PRIV_INSTR_INT_ENABLE,
5789 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5790 		break;
5791 	default:
5792 		break;
5793 	}
5794 
5795 	return 0;
5796 }
5797 
5798 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)				\
5799 	WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5800 			CP_ECC_ERROR_INT_ENABLE, 1)
5801 
5802 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)			\
5803 	WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5804 			CP_ECC_ERROR_INT_ENABLE, 0)
5805 
5806 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5807 					      struct amdgpu_irq_src *source,
5808 					      unsigned type,
5809 					      enum amdgpu_interrupt_state state)
5810 {
5811 	switch (state) {
5812 	case AMDGPU_IRQ_STATE_DISABLE:
5813 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5814 				CP_ECC_ERROR_INT_ENABLE, 0);
5815 		DISABLE_ECC_ON_ME_PIPE(1, 0);
5816 		DISABLE_ECC_ON_ME_PIPE(1, 1);
5817 		DISABLE_ECC_ON_ME_PIPE(1, 2);
5818 		DISABLE_ECC_ON_ME_PIPE(1, 3);
5819 		break;
5820 
5821 	case AMDGPU_IRQ_STATE_ENABLE:
5822 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5823 				CP_ECC_ERROR_INT_ENABLE, 1);
5824 		ENABLE_ECC_ON_ME_PIPE(1, 0);
5825 		ENABLE_ECC_ON_ME_PIPE(1, 1);
5826 		ENABLE_ECC_ON_ME_PIPE(1, 2);
5827 		ENABLE_ECC_ON_ME_PIPE(1, 3);
5828 		break;
5829 	default:
5830 		break;
5831 	}
5832 
5833 	return 0;
5834 }
5835 
5836 
5837 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5838 					    struct amdgpu_irq_src *src,
5839 					    unsigned type,
5840 					    enum amdgpu_interrupt_state state)
5841 {
5842 	switch (type) {
5843 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5844 		gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
5845 		break;
5846 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5847 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5848 		break;
5849 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5850 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5851 		break;
5852 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5853 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5854 		break;
5855 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5856 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5857 		break;
5858 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5859 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5860 		break;
5861 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5862 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5863 		break;
5864 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5865 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5866 		break;
5867 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5868 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5869 		break;
5870 	default:
5871 		break;
5872 	}
5873 	return 0;
5874 }
5875 
5876 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
5877 			    struct amdgpu_irq_src *source,
5878 			    struct amdgpu_iv_entry *entry)
5879 {
5880 	int i;
5881 	u8 me_id, pipe_id, queue_id;
5882 	struct amdgpu_ring *ring;
5883 
5884 	DRM_DEBUG("IH: CP EOP\n");
5885 	me_id = (entry->ring_id & 0x0c) >> 2;
5886 	pipe_id = (entry->ring_id & 0x03) >> 0;
5887 	queue_id = (entry->ring_id & 0x70) >> 4;
5888 
5889 	switch (me_id) {
5890 	case 0:
5891 		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5892 		break;
5893 	case 1:
5894 	case 2:
5895 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5896 			ring = &adev->gfx.compute_ring[i];
5897 			/* Per-queue interrupt is supported for MEC starting from VI.
5898 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
5899 			  */
5900 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5901 				amdgpu_fence_process(ring);
5902 		}
5903 		break;
5904 	}
5905 	return 0;
5906 }
5907 
5908 static void gfx_v9_0_fault(struct amdgpu_device *adev,
5909 			   struct amdgpu_iv_entry *entry)
5910 {
5911 	u8 me_id, pipe_id, queue_id;
5912 	struct amdgpu_ring *ring;
5913 	int i;
5914 
5915 	me_id = (entry->ring_id & 0x0c) >> 2;
5916 	pipe_id = (entry->ring_id & 0x03) >> 0;
5917 	queue_id = (entry->ring_id & 0x70) >> 4;
5918 
5919 	switch (me_id) {
5920 	case 0:
5921 		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5922 		break;
5923 	case 1:
5924 	case 2:
5925 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5926 			ring = &adev->gfx.compute_ring[i];
5927 			if (ring->me == me_id && ring->pipe == pipe_id &&
5928 			    ring->queue == queue_id)
5929 				drm_sched_fault(&ring->sched);
5930 		}
5931 		break;
5932 	}
5933 }
5934 
5935 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
5936 				 struct amdgpu_irq_src *source,
5937 				 struct amdgpu_iv_entry *entry)
5938 {
5939 	DRM_ERROR("Illegal register access in command stream\n");
5940 	gfx_v9_0_fault(adev, entry);
5941 	return 0;
5942 }
5943 
5944 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
5945 				  struct amdgpu_irq_src *source,
5946 				  struct amdgpu_iv_entry *entry)
5947 {
5948 	DRM_ERROR("Illegal instruction in command stream\n");
5949 	gfx_v9_0_fault(adev, entry);
5950 	return 0;
5951 }
5952 
5953 
5954 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
5955 	{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
5956 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5957 	  SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
5958 	},
5959 	{ "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
5960 	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
5961 	  SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
5962 	},
5963 	{ "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5964 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
5965 	  0, 0
5966 	},
5967 	{ "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5968 	  SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
5969 	  0, 0
5970 	},
5971 	{ "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
5972 	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
5973 	  SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
5974 	},
5975 	{ "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5976 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
5977 	  0, 0
5978 	},
5979 	{ "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5980 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5981 	  SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
5982 	},
5983 	{ "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
5984 	  SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
5985 	  SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
5986 	},
5987 	{ "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
5988 	  SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
5989 	  0, 0
5990 	},
5991 	{ "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
5992 	  SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
5993 	  0, 0
5994 	},
5995 	{ "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
5996 	  SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
5997 	  0, 0
5998 	},
5999 	{ "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6000 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
6001 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
6002 	},
6003 	{ "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
6004 	  SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
6005 	  0, 0
6006 	},
6007 	{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6008 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
6009 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
6010 	},
6011 	{ "GDS_OA_PHY_PHY_CMD_RAM_MEM",
6012 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6013 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
6014 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
6015 	},
6016 	{ "GDS_OA_PHY_PHY_DATA_RAM_MEM",
6017 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
6018 	  SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
6019 	  0, 0
6020 	},
6021 	{ "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
6022 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6023 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
6024 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
6025 	},
6026 	{ "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
6027 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6028 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
6029 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
6030 	},
6031 	{ "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
6032 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6033 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
6034 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
6035 	},
6036 	{ "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
6037 	  SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
6038 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
6039 	  SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
6040 	},
6041 	{ "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
6042 	  SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
6043 	  0, 0
6044 	},
6045 	{ "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6046 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
6047 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
6048 	},
6049 	{ "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6050 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
6051 	  0, 0
6052 	},
6053 	{ "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6054 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
6055 	  0, 0
6056 	},
6057 	{ "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6058 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
6059 	  0, 0
6060 	},
6061 	{ "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6062 	  SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
6063 	  0, 0
6064 	},
6065 	{ "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6066 	  SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
6067 	  0, 0
6068 	},
6069 	{ "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6070 	  SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
6071 	  0, 0
6072 	},
6073 	{ "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6074 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
6075 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
6076 	},
6077 	{ "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6078 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
6079 	  SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
6080 	},
6081 	{ "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6082 	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
6083 	  SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
6084 	},
6085 	{ "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6086 	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
6087 	  SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
6088 	},
6089 	{ "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6090 	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
6091 	  SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
6092 	},
6093 	{ "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6094 	  SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
6095 	  0, 0
6096 	},
6097 	{ "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6098 	  SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
6099 	  0, 0
6100 	},
6101 	{ "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6102 	  SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
6103 	  0, 0
6104 	},
6105 	{ "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6106 	  SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
6107 	  0, 0
6108 	},
6109 	{ "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6110 	  SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
6111 	  0, 0
6112 	},
6113 	{ "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6114 	  SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
6115 	  0, 0
6116 	},
6117 	{ "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6118 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6119 	  0, 0
6120 	},
6121 	{ "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6122 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6123 	  0, 0
6124 	},
6125 	{ "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6126 	  SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6127 	  0, 0
6128 	},
6129 	{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6130 	  SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6131 	  0, 0
6132 	},
6133 	{ "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6134 	  SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6135 	  0, 0
6136 	},
6137 	{ "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6138 	  SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6139 	  0, 0
6140 	},
6141 	{ "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6142 	  SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6143 	  0, 0
6144 	},
6145 	{ "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6146 	  SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6147 	  0, 0
6148 	},
6149 	{ "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6150 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6151 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6152 	},
6153 	{ "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6154 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6155 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6156 	},
6157 	{ "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6158 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6159 	  0, 0
6160 	},
6161 	{ "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6162 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6163 	  0, 0
6164 	},
6165 	{ "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6166 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6167 	  0, 0
6168 	},
6169 	{ "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6170 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6171 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6172 	},
6173 	{ "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6174 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6175 	  SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6176 	},
6177 	{ "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6178 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6179 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6180 	},
6181 	{ "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6182 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6183 	  SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6184 	},
6185 	{ "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6186 	  SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6187 	  0, 0
6188 	},
6189 	{ "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6190 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6191 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6192 	},
6193 	{ "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6194 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6195 	  SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6196 	},
6197 	{ "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6198 	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6199 	  SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6200 	},
6201 	{ "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6202 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6203 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6204 	},
6205 	{ "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6206 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6207 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6208 	},
6209 	{ "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6210 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6211 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6212 	},
6213 	{ "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6214 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6215 	  SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6216 	},
6217 	{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6218 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6219 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6220 	},
6221 	{ "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6222 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6223 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6224 	},
6225 	{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6226 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6227 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6228 	},
6229 	{ "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6230 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6231 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6232 	},
6233 	{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6234 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6235 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6236 	},
6237 	{ "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6238 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6239 	  SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6240 	},
6241 	{ "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6242 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6243 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6244 	},
6245 	{ "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6246 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6247 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6248 	},
6249 	{ "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6250 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6251 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6252 	},
6253 	{ "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6254 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6255 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6256 	},
6257 	{ "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6258 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6259 	  0, 0
6260 	},
6261 	{ "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6262 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6263 	  0, 0
6264 	},
6265 	{ "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6266 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6267 	  0, 0
6268 	},
6269 	{ "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6270 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6271 	  0, 0
6272 	},
6273 	{ "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6274 	  SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6275 	  0, 0
6276 	},
6277 	{ "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6278 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6279 	  SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6280 	},
6281 	{ "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6282 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6283 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6284 	},
6285 	{ "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6286 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6287 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6288 	},
6289 	{ "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6290 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6291 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6292 	},
6293 	{ "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6294 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6295 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6296 	},
6297 	{ "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6298 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6299 	  0, 0
6300 	},
6301 	{ "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6302 	  SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6303 	  0, 0
6304 	},
6305 	{ "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6306 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6307 	  0, 0
6308 	},
6309 	{ "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6310 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6311 	  0, 0
6312 	},
6313 	{ "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6314 	  SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6315 	  0, 0
6316 	},
6317 	{ "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6318 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6319 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6320 	},
6321 	{ "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6322 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6323 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6324 	},
6325 	{ "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6326 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6327 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6328 	},
6329 	{ "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6330 	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6331 	  SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6332 	},
6333 	{ "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6334 	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6335 	  SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6336 	},
6337 	{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6338 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6339 	  0, 0
6340 	},
6341 	{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6342 	  SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6343 	  0, 0
6344 	},
6345 	{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6346 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6347 	  0, 0
6348 	},
6349 	{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6350 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6351 	  0, 0
6352 	},
6353 	{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6354 	  SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6355 	  0, 0
6356 	},
6357 	{ "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6358 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6359 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6360 	},
6361 	{ "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6362 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6363 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6364 	},
6365 	{ "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6366 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6367 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6368 	},
6369 	{ "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6370 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6371 	  0, 0
6372 	},
6373 	{ "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6374 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6375 	  0, 0
6376 	},
6377 	{ "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6378 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6379 	  0, 0
6380 	},
6381 	{ "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6382 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6383 	  0, 0
6384 	},
6385 	{ "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6386 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6387 	  0, 0
6388 	},
6389 	{ "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6390 	  SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6391 	  0, 0
6392 	}
6393 };
6394 
6395 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6396 				     void *inject_if)
6397 {
6398 	struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6399 	int ret;
6400 	struct ta_ras_trigger_error_input block_info = { 0 };
6401 
6402 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6403 		return -EINVAL;
6404 
6405 	if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6406 		return -EINVAL;
6407 
6408 	if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6409 		return -EPERM;
6410 
6411 	if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6412 	      info->head.type)) {
6413 		DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6414 			ras_gfx_subblocks[info->head.sub_block_index].name,
6415 			info->head.type);
6416 		return -EPERM;
6417 	}
6418 
6419 	if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6420 	      info->head.type)) {
6421 		DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6422 			ras_gfx_subblocks[info->head.sub_block_index].name,
6423 			info->head.type);
6424 		return -EPERM;
6425 	}
6426 
6427 	block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6428 	block_info.sub_block_index =
6429 		ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6430 	block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6431 	block_info.address = info->address;
6432 	block_info.value = info->value;
6433 
6434 	mutex_lock(&adev->grbm_idx_mutex);
6435 	ret = psp_ras_trigger_error(&adev->psp, &block_info);
6436 	mutex_unlock(&adev->grbm_idx_mutex);
6437 
6438 	return ret;
6439 }
6440 
6441 static const char *vml2_mems[] = {
6442 	"UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6443 	"UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6444 	"UTC_VML2_BANK_CACHE_0_4K_MEM0",
6445 	"UTC_VML2_BANK_CACHE_0_4K_MEM1",
6446 	"UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6447 	"UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6448 	"UTC_VML2_BANK_CACHE_1_4K_MEM0",
6449 	"UTC_VML2_BANK_CACHE_1_4K_MEM1",
6450 	"UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6451 	"UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6452 	"UTC_VML2_BANK_CACHE_2_4K_MEM0",
6453 	"UTC_VML2_BANK_CACHE_2_4K_MEM1",
6454 	"UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6455 	"UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6456 	"UTC_VML2_BANK_CACHE_3_4K_MEM0",
6457 	"UTC_VML2_BANK_CACHE_3_4K_MEM1",
6458 };
6459 
6460 static const char *vml2_walker_mems[] = {
6461 	"UTC_VML2_CACHE_PDE0_MEM0",
6462 	"UTC_VML2_CACHE_PDE0_MEM1",
6463 	"UTC_VML2_CACHE_PDE1_MEM0",
6464 	"UTC_VML2_CACHE_PDE1_MEM1",
6465 	"UTC_VML2_CACHE_PDE2_MEM0",
6466 	"UTC_VML2_CACHE_PDE2_MEM1",
6467 	"UTC_VML2_RDIF_LOG_FIFO",
6468 };
6469 
6470 static const char *atc_l2_cache_2m_mems[] = {
6471 	"UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6472 	"UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6473 	"UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6474 	"UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6475 };
6476 
6477 static const char *atc_l2_cache_4k_mems[] = {
6478 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6479 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6480 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6481 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6482 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6483 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6484 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6485 	"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6486 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6487 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6488 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6489 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6490 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6491 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6492 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6493 	"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6494 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6495 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6496 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6497 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6498 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6499 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6500 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6501 	"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6502 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6503 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6504 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6505 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6506 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6507 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6508 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6509 	"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6510 };
6511 
6512 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6513 					 struct ras_err_data *err_data)
6514 {
6515 	uint32_t i, data;
6516 	uint32_t sec_count, ded_count;
6517 
6518 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6519 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6520 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6521 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6522 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6523 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6524 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6525 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6526 
6527 	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6528 		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6529 		data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6530 
6531 		sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6532 		if (sec_count) {
6533 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6534 				"SEC %d\n", i, vml2_mems[i], sec_count);
6535 			err_data->ce_count += sec_count;
6536 		}
6537 
6538 		ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6539 		if (ded_count) {
6540 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6541 				"DED %d\n", i, vml2_mems[i], ded_count);
6542 			err_data->ue_count += ded_count;
6543 		}
6544 	}
6545 
6546 	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6547 		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6548 		data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6549 
6550 		sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6551 						SEC_COUNT);
6552 		if (sec_count) {
6553 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6554 				"SEC %d\n", i, vml2_walker_mems[i], sec_count);
6555 			err_data->ce_count += sec_count;
6556 		}
6557 
6558 		ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6559 						DED_COUNT);
6560 		if (ded_count) {
6561 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6562 				"DED %d\n", i, vml2_walker_mems[i], ded_count);
6563 			err_data->ue_count += ded_count;
6564 		}
6565 	}
6566 
6567 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6568 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6569 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6570 
6571 		sec_count = (data & 0x00006000L) >> 0xd;
6572 		if (sec_count) {
6573 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6574 				"SEC %d\n", i, atc_l2_cache_2m_mems[i],
6575 				sec_count);
6576 			err_data->ce_count += sec_count;
6577 		}
6578 	}
6579 
6580 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6581 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6582 		data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6583 
6584 		sec_count = (data & 0x00006000L) >> 0xd;
6585 		if (sec_count) {
6586 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6587 				"SEC %d\n", i, atc_l2_cache_4k_mems[i],
6588 				sec_count);
6589 			err_data->ce_count += sec_count;
6590 		}
6591 
6592 		ded_count = (data & 0x00018000L) >> 0xf;
6593 		if (ded_count) {
6594 			dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6595 				"DED %d\n", i, atc_l2_cache_4k_mems[i],
6596 				ded_count);
6597 			err_data->ue_count += ded_count;
6598 		}
6599 	}
6600 
6601 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6602 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6603 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6604 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6605 
6606 	return 0;
6607 }
6608 
6609 static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6610 	const struct soc15_reg_entry *reg,
6611 	uint32_t se_id, uint32_t inst_id, uint32_t value,
6612 	uint32_t *sec_count, uint32_t *ded_count)
6613 {
6614 	uint32_t i;
6615 	uint32_t sec_cnt, ded_cnt;
6616 
6617 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6618 		if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6619 			gfx_v9_0_ras_fields[i].seg != reg->seg ||
6620 			gfx_v9_0_ras_fields[i].inst != reg->inst)
6621 			continue;
6622 
6623 		sec_cnt = (value &
6624 				gfx_v9_0_ras_fields[i].sec_count_mask) >>
6625 				gfx_v9_0_ras_fields[i].sec_count_shift;
6626 		if (sec_cnt) {
6627 			dev_info(adev->dev, "GFX SubBlock %s, "
6628 				"Instance[%d][%d], SEC %d\n",
6629 				gfx_v9_0_ras_fields[i].name,
6630 				se_id, inst_id,
6631 				sec_cnt);
6632 			*sec_count += sec_cnt;
6633 		}
6634 
6635 		ded_cnt = (value &
6636 				gfx_v9_0_ras_fields[i].ded_count_mask) >>
6637 				gfx_v9_0_ras_fields[i].ded_count_shift;
6638 		if (ded_cnt) {
6639 			dev_info(adev->dev, "GFX SubBlock %s, "
6640 				"Instance[%d][%d], DED %d\n",
6641 				gfx_v9_0_ras_fields[i].name,
6642 				se_id, inst_id,
6643 				ded_cnt);
6644 			*ded_count += ded_cnt;
6645 		}
6646 	}
6647 
6648 	return 0;
6649 }
6650 
6651 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6652 {
6653 	int i, j, k;
6654 
6655 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6656 		return;
6657 
6658 	/* read back registers to clear the counters */
6659 	mutex_lock(&adev->grbm_idx_mutex);
6660 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6661 		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6662 			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6663 				gfx_v9_0_select_se_sh(adev, j, 0x0, k);
6664 				RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6665 			}
6666 		}
6667 	}
6668 	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6669 	mutex_unlock(&adev->grbm_idx_mutex);
6670 
6671 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6672 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6673 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6674 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6675 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6676 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6677 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6678 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6679 
6680 	for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6681 		WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6682 		RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6683 	}
6684 
6685 	for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6686 		WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6687 		RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6688 	}
6689 
6690 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6691 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6692 		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6693 	}
6694 
6695 	for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6696 		WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6697 		RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6698 	}
6699 
6700 	WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6701 	WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6702 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6703 	WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6704 }
6705 
6706 static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6707 					  void *ras_error_status)
6708 {
6709 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6710 	uint32_t sec_count = 0, ded_count = 0;
6711 	uint32_t i, j, k;
6712 	uint32_t reg_value;
6713 
6714 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6715 		return;
6716 
6717 	err_data->ue_count = 0;
6718 	err_data->ce_count = 0;
6719 
6720 	mutex_lock(&adev->grbm_idx_mutex);
6721 
6722 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6723 		for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6724 			for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6725 				gfx_v9_0_select_se_sh(adev, j, 0, k);
6726 				reg_value =
6727 					RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6728 				if (reg_value)
6729 					gfx_v9_0_ras_error_count(adev,
6730 						&gfx_v9_0_edc_counter_regs[i],
6731 						j, k, reg_value,
6732 						&sec_count, &ded_count);
6733 			}
6734 		}
6735 	}
6736 
6737 	err_data->ce_count += sec_count;
6738 	err_data->ue_count += ded_count;
6739 
6740 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6741 	mutex_unlock(&adev->grbm_idx_mutex);
6742 
6743 	gfx_v9_0_query_utc_edc_status(adev, err_data);
6744 }
6745 
6746 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
6747 {
6748 	const unsigned int cp_coher_cntl =
6749 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
6750 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
6751 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
6752 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
6753 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
6754 
6755 	/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
6756 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6757 	amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
6758 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6759 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
6760 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6761 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
6762 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6763 }
6764 
6765 static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
6766 					uint32_t pipe, bool enable)
6767 {
6768 	struct amdgpu_device *adev = ring->adev;
6769 	uint32_t val;
6770 	uint32_t wcl_cs_reg;
6771 
6772 	/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
6773 	val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
6774 
6775 	switch (pipe) {
6776 	case 0:
6777 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
6778 		break;
6779 	case 1:
6780 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
6781 		break;
6782 	case 2:
6783 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
6784 		break;
6785 	case 3:
6786 		wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
6787 		break;
6788 	default:
6789 		DRM_DEBUG("invalid pipe %d\n", pipe);
6790 		return;
6791 	}
6792 
6793 	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
6794 
6795 }
6796 static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
6797 {
6798 	struct amdgpu_device *adev = ring->adev;
6799 	uint32_t val;
6800 	int i;
6801 
6802 
6803 	/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
6804 	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
6805 	 * around 25% of gpu resources.
6806 	 */
6807 	val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
6808 	amdgpu_ring_emit_wreg(ring,
6809 			      SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
6810 			      val);
6811 
6812 	/* Restrict waves for normal/low priority compute queues as well
6813 	 * to get best QoS for high priority compute jobs.
6814 	 *
6815 	 * amdgpu controls only 1st ME(0-3 CS pipes).
6816 	 */
6817 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
6818 		if (i != ring->pipe)
6819 			gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
6820 
6821 	}
6822 }
6823 
6824 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
6825 	.name = "gfx_v9_0",
6826 	.early_init = gfx_v9_0_early_init,
6827 	.late_init = gfx_v9_0_late_init,
6828 	.sw_init = gfx_v9_0_sw_init,
6829 	.sw_fini = gfx_v9_0_sw_fini,
6830 	.hw_init = gfx_v9_0_hw_init,
6831 	.hw_fini = gfx_v9_0_hw_fini,
6832 	.suspend = gfx_v9_0_suspend,
6833 	.resume = gfx_v9_0_resume,
6834 	.is_idle = gfx_v9_0_is_idle,
6835 	.wait_for_idle = gfx_v9_0_wait_for_idle,
6836 	.soft_reset = gfx_v9_0_soft_reset,
6837 	.set_clockgating_state = gfx_v9_0_set_clockgating_state,
6838 	.set_powergating_state = gfx_v9_0_set_powergating_state,
6839 	.get_clockgating_state = gfx_v9_0_get_clockgating_state,
6840 };
6841 
6842 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
6843 	.type = AMDGPU_RING_TYPE_GFX,
6844 	.align_mask = 0xff,
6845 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6846 	.support_64bit_ptrs = true,
6847 	.secure_submission_supported = true,
6848 	.vmhub = AMDGPU_GFXHUB_0,
6849 	.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
6850 	.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
6851 	.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
6852 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
6853 		5 +  /* COND_EXEC */
6854 		7 +  /* PIPELINE_SYNC */
6855 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6856 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6857 		2 + /* VM_FLUSH */
6858 		8 +  /* FENCE for VM_FLUSH */
6859 		20 + /* GDS switch */
6860 		4 + /* double SWITCH_BUFFER,
6861 		       the first COND_EXEC jump to the place just
6862 			   prior to this double SWITCH_BUFFER  */
6863 		5 + /* COND_EXEC */
6864 		7 +	 /*	HDP_flush */
6865 		4 +	 /*	VGT_flush */
6866 		14 + /*	CE_META */
6867 		31 + /*	DE_META */
6868 		3 + /* CNTX_CTRL */
6869 		5 + /* HDP_INVL */
6870 		8 + 8 + /* FENCE x2 */
6871 		2 + /* SWITCH_BUFFER */
6872 		7, /* gfx_v9_0_emit_mem_sync */
6873 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_gfx */
6874 	.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
6875 	.emit_fence = gfx_v9_0_ring_emit_fence,
6876 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6877 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6878 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6879 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6880 	.test_ring = gfx_v9_0_ring_test_ring,
6881 	.test_ib = gfx_v9_0_ring_test_ib,
6882 	.insert_nop = amdgpu_ring_insert_nop,
6883 	.pad_ib = amdgpu_ring_generic_pad_ib,
6884 	.emit_switch_buffer = gfx_v9_ring_emit_sb,
6885 	.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
6886 	.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
6887 	.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
6888 	.emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
6889 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
6890 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6891 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6892 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
6893 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
6894 };
6895 
6896 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
6897 	.type = AMDGPU_RING_TYPE_COMPUTE,
6898 	.align_mask = 0xff,
6899 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6900 	.support_64bit_ptrs = true,
6901 	.vmhub = AMDGPU_GFXHUB_0,
6902 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
6903 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
6904 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
6905 	.emit_frame_size =
6906 		20 + /* gfx_v9_0_ring_emit_gds_switch */
6907 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
6908 		5 + /* hdp invalidate */
6909 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6910 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6911 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6912 		2 + /* gfx_v9_0_ring_emit_vm_flush */
6913 		8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
6914 		7 + /* gfx_v9_0_emit_mem_sync */
6915 		5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
6916 		15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
6917 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
6918 	.emit_ib = gfx_v9_0_ring_emit_ib_compute,
6919 	.emit_fence = gfx_v9_0_ring_emit_fence,
6920 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6921 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6922 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6923 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6924 	.test_ring = gfx_v9_0_ring_test_ring,
6925 	.test_ib = gfx_v9_0_ring_test_ib,
6926 	.insert_nop = amdgpu_ring_insert_nop,
6927 	.pad_ib = amdgpu_ring_generic_pad_ib,
6928 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
6929 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6930 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6931 	.emit_mem_sync = gfx_v9_0_emit_mem_sync,
6932 	.emit_wave_limit = gfx_v9_0_emit_wave_limit,
6933 };
6934 
6935 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
6936 	.type = AMDGPU_RING_TYPE_KIQ,
6937 	.align_mask = 0xff,
6938 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
6939 	.support_64bit_ptrs = true,
6940 	.vmhub = AMDGPU_GFXHUB_0,
6941 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
6942 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
6943 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
6944 	.emit_frame_size =
6945 		20 + /* gfx_v9_0_ring_emit_gds_switch */
6946 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
6947 		5 + /* hdp invalidate */
6948 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6949 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6950 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6951 		2 + /* gfx_v9_0_ring_emit_vm_flush */
6952 		8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6953 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
6954 	.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
6955 	.test_ring = gfx_v9_0_ring_test_ring,
6956 	.insert_nop = amdgpu_ring_insert_nop,
6957 	.pad_ib = amdgpu_ring_generic_pad_ib,
6958 	.emit_rreg = gfx_v9_0_ring_emit_rreg,
6959 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
6960 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6961 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6962 };
6963 
6964 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
6965 {
6966 	int i;
6967 
6968 	adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
6969 
6970 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6971 		adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
6972 
6973 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
6974 		adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
6975 }
6976 
6977 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
6978 	.set = gfx_v9_0_set_eop_interrupt_state,
6979 	.process = gfx_v9_0_eop_irq,
6980 };
6981 
6982 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
6983 	.set = gfx_v9_0_set_priv_reg_fault_state,
6984 	.process = gfx_v9_0_priv_reg_irq,
6985 };
6986 
6987 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
6988 	.set = gfx_v9_0_set_priv_inst_fault_state,
6989 	.process = gfx_v9_0_priv_inst_irq,
6990 };
6991 
6992 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
6993 	.set = gfx_v9_0_set_cp_ecc_error_state,
6994 	.process = amdgpu_gfx_cp_ecc_error_irq,
6995 };
6996 
6997 
6998 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
6999 {
7000 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7001 	adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
7002 
7003 	adev->gfx.priv_reg_irq.num_types = 1;
7004 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
7005 
7006 	adev->gfx.priv_inst_irq.num_types = 1;
7007 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
7008 
7009 	adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
7010 	adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
7011 }
7012 
7013 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
7014 {
7015 	switch (adev->ip_versions[GC_HWIP][0]) {
7016 	case IP_VERSION(9, 0, 1):
7017 	case IP_VERSION(9, 2, 1):
7018 	case IP_VERSION(9, 4, 0):
7019 	case IP_VERSION(9, 2, 2):
7020 	case IP_VERSION(9, 1, 0):
7021 	case IP_VERSION(9, 4, 1):
7022 	case IP_VERSION(9, 3, 0):
7023 	case IP_VERSION(9, 4, 2):
7024 		adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
7025 		break;
7026 	default:
7027 		break;
7028 	}
7029 }
7030 
7031 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
7032 {
7033 	/* init asci gds info */
7034 	switch (adev->ip_versions[GC_HWIP][0]) {
7035 	case IP_VERSION(9, 0, 1):
7036 	case IP_VERSION(9, 2, 1):
7037 	case IP_VERSION(9, 4, 0):
7038 		adev->gds.gds_size = 0x10000;
7039 		break;
7040 	case IP_VERSION(9, 2, 2):
7041 	case IP_VERSION(9, 1, 0):
7042 	case IP_VERSION(9, 4, 1):
7043 		adev->gds.gds_size = 0x1000;
7044 		break;
7045 	case IP_VERSION(9, 4, 2):
7046 		/* aldebaran removed all the GDS internal memory,
7047 		 * only support GWS opcode in kernel, like barrier
7048 		 * semaphore.etc */
7049 		adev->gds.gds_size = 0;
7050 		break;
7051 	default:
7052 		adev->gds.gds_size = 0x10000;
7053 		break;
7054 	}
7055 
7056 	switch (adev->ip_versions[GC_HWIP][0]) {
7057 	case IP_VERSION(9, 0, 1):
7058 	case IP_VERSION(9, 4, 0):
7059 		adev->gds.gds_compute_max_wave_id = 0x7ff;
7060 		break;
7061 	case IP_VERSION(9, 2, 1):
7062 		adev->gds.gds_compute_max_wave_id = 0x27f;
7063 		break;
7064 	case IP_VERSION(9, 2, 2):
7065 	case IP_VERSION(9, 1, 0):
7066 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
7067 			adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
7068 		else
7069 			adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
7070 		break;
7071 	case IP_VERSION(9, 4, 1):
7072 		adev->gds.gds_compute_max_wave_id = 0xfff;
7073 		break;
7074 	case IP_VERSION(9, 4, 2):
7075 		/* deprecated for Aldebaran, no usage at all */
7076 		adev->gds.gds_compute_max_wave_id = 0;
7077 		break;
7078 	default:
7079 		/* this really depends on the chip */
7080 		adev->gds.gds_compute_max_wave_id = 0x7ff;
7081 		break;
7082 	}
7083 
7084 	adev->gds.gws_size = 64;
7085 	adev->gds.oa_size = 16;
7086 }
7087 
7088 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7089 						 u32 bitmap)
7090 {
7091 	u32 data;
7092 
7093 	if (!bitmap)
7094 		return;
7095 
7096 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7097 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7098 
7099 	WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
7100 }
7101 
7102 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7103 {
7104 	u32 data, mask;
7105 
7106 	data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
7107 	data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
7108 
7109 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7110 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7111 
7112 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7113 
7114 	return (~data) & mask;
7115 }
7116 
7117 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
7118 				 struct amdgpu_cu_info *cu_info)
7119 {
7120 	int i, j, k, counter, active_cu_number = 0;
7121 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7122 	unsigned disable_masks[4 * 4];
7123 
7124 	if (!adev || !cu_info)
7125 		return -EINVAL;
7126 
7127 	/*
7128 	 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
7129 	 */
7130 	if (adev->gfx.config.max_shader_engines *
7131 		adev->gfx.config.max_sh_per_se > 16)
7132 		return -EINVAL;
7133 
7134 	amdgpu_gfx_parse_disable_cu(disable_masks,
7135 				    adev->gfx.config.max_shader_engines,
7136 				    adev->gfx.config.max_sh_per_se);
7137 
7138 	mutex_lock(&adev->grbm_idx_mutex);
7139 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7140 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7141 			mask = 1;
7142 			ao_bitmap = 0;
7143 			counter = 0;
7144 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
7145 			gfx_v9_0_set_user_cu_inactive_bitmap(
7146 				adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
7147 			bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
7148 
7149 			/*
7150 			 * The bitmap(and ao_cu_bitmap) in cu_info structure is
7151 			 * 4x4 size array, and it's usually suitable for Vega
7152 			 * ASICs which has 4*2 SE/SH layout.
7153 			 * But for Arcturus, SE/SH layout is changed to 8*1.
7154 			 * To mostly reduce the impact, we make it compatible
7155 			 * with current bitmap array as below:
7156 			 *    SE4,SH0 --> bitmap[0][1]
7157 			 *    SE5,SH0 --> bitmap[1][1]
7158 			 *    SE6,SH0 --> bitmap[2][1]
7159 			 *    SE7,SH0 --> bitmap[3][1]
7160 			 */
7161 			cu_info->bitmap[i % 4][j + i / 4] = bitmap;
7162 
7163 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7164 				if (bitmap & mask) {
7165 					if (counter < adev->gfx.config.max_cu_per_sh)
7166 						ao_bitmap |= mask;
7167 					counter ++;
7168 				}
7169 				mask <<= 1;
7170 			}
7171 			active_cu_number += counter;
7172 			if (i < 2 && j < 2)
7173 				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7174 			cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
7175 		}
7176 	}
7177 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
7178 	mutex_unlock(&adev->grbm_idx_mutex);
7179 
7180 	cu_info->number = active_cu_number;
7181 	cu_info->ao_cu_mask = ao_cu_mask;
7182 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7183 
7184 	return 0;
7185 }
7186 
7187 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
7188 {
7189 	.type = AMD_IP_BLOCK_TYPE_GFX,
7190 	.major = 9,
7191 	.minor = 0,
7192 	.rev = 0,
7193 	.funcs = &gfx_v9_0_ip_funcs,
7194 };
7195