xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c (revision 22d55f02)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/kernel.h>
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "amdgpu_atomfirmware.h"
31 #include "amdgpu_pm.h"
32 
33 #include "gc/gc_9_0_offset.h"
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "vega10_enum.h"
36 #include "hdp/hdp_4_0_offset.h"
37 
38 #include "soc15_common.h"
39 #include "clearstate_gfx9.h"
40 #include "v9_structs.h"
41 
42 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
43 
44 #include "amdgpu_ras.h"
45 
46 #define GFX9_NUM_GFX_RINGS     1
47 #define GFX9_MEC_HPD_SIZE 4096
48 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
49 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
50 
51 #define mmPWR_MISC_CNTL_STATUS					0x0183
52 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX				0
53 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT	0x0
54 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT		0x1
55 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK		0x00000001L
56 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK		0x00000006L
57 
58 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
59 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
60 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
61 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
62 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
63 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
64 
65 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
66 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
67 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
68 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
69 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
70 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
71 
72 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
73 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
74 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
75 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
76 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
77 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
78 
79 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
80 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
81 MODULE_FIRMWARE("amdgpu/raven_me.bin");
82 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
83 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
84 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
85 
86 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
87 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
88 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
89 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
90 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
91 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
92 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
93 
94 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
95 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
96 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
97 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
98 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
99 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
100 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
101 
102 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
103 {
104 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
105 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
106 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
107 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
108 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
109 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
110 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
111 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
112 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
113 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
114 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
115 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
116 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
117 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
118 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
119 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
120 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
121 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
122 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
123 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
124 };
125 
126 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
127 {
128 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
129 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
130 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
131 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
132 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
133 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
134 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
135 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
136 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
137 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
138 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
139 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
140 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
141 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
142 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
143 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
144 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
145 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
146 };
147 
148 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
149 {
150 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
151 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
152 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
153 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
154 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
155 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
156 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
157 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
158 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
159 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
160 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
161 };
162 
163 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
164 {
165 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
166 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
167 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
168 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
169 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
170 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
171 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
172 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
173 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
174 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
175 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
176 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
177 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
178 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
179 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
180 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
181 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
182 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
183 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
184 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
185 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
186 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
187 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
188 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
189 };
190 
191 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
192 {
193 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
194 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
195 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
196 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
197 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
198 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
199 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
200 };
201 
202 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
203 {
204 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
205 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
206 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
207 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
208 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
209 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
210 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
211 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
212 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
213 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
214 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
215 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
216 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
217 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
218 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
219 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
220 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
221 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
222 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
223 };
224 
225 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
226 {
227 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
228 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
229 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
230 };
231 
232 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
233 {
234 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
235 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
236 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
237 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
238 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
239 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
240 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
241 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
242 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
243 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
244 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
245 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
246 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
247 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
248 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
249 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
250 };
251 
252 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
253 {
254 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
255 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
256 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
257 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
258 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
259 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
260 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
261 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
262 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
263 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
264 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
265 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
266 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
267 };
268 
269 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
270 {
271 	mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
272 	mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
273 	mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
274 	mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
275 	mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
276 	mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
277 	mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
278 	mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
279 };
280 
281 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
282 {
283 	mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
284 	mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
285 	mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
286 	mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
287 	mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
288 	mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
289 	mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
290 	mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
291 };
292 
293 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
294 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
295 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
296 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
297 
298 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
299 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
300 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
301 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
302 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
303                                  struct amdgpu_cu_info *cu_info);
304 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
305 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
306 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
307 
308 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
309 {
310 	switch (adev->asic_type) {
311 	case CHIP_VEGA10:
312 		soc15_program_register_sequence(adev,
313 						 golden_settings_gc_9_0,
314 						 ARRAY_SIZE(golden_settings_gc_9_0));
315 		soc15_program_register_sequence(adev,
316 						 golden_settings_gc_9_0_vg10,
317 						 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
318 		break;
319 	case CHIP_VEGA12:
320 		soc15_program_register_sequence(adev,
321 						golden_settings_gc_9_2_1,
322 						ARRAY_SIZE(golden_settings_gc_9_2_1));
323 		soc15_program_register_sequence(adev,
324 						golden_settings_gc_9_2_1_vg12,
325 						ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
326 		break;
327 	case CHIP_VEGA20:
328 		soc15_program_register_sequence(adev,
329 						golden_settings_gc_9_0,
330 						ARRAY_SIZE(golden_settings_gc_9_0));
331 		soc15_program_register_sequence(adev,
332 						golden_settings_gc_9_0_vg20,
333 						ARRAY_SIZE(golden_settings_gc_9_0_vg20));
334 		break;
335 	case CHIP_RAVEN:
336 		soc15_program_register_sequence(adev, golden_settings_gc_9_1,
337 						ARRAY_SIZE(golden_settings_gc_9_1));
338 		if (adev->rev_id >= 8)
339 			soc15_program_register_sequence(adev,
340 							golden_settings_gc_9_1_rv2,
341 							ARRAY_SIZE(golden_settings_gc_9_1_rv2));
342 		else
343 			soc15_program_register_sequence(adev,
344 							golden_settings_gc_9_1_rv1,
345 							ARRAY_SIZE(golden_settings_gc_9_1_rv1));
346 		break;
347 	default:
348 		break;
349 	}
350 
351 	soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
352 					(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
353 }
354 
355 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
356 {
357 	adev->gfx.scratch.num_reg = 8;
358 	adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
359 	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
360 }
361 
362 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
363 				       bool wc, uint32_t reg, uint32_t val)
364 {
365 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
366 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
367 				WRITE_DATA_DST_SEL(0) |
368 				(wc ? WR_CONFIRM : 0));
369 	amdgpu_ring_write(ring, reg);
370 	amdgpu_ring_write(ring, 0);
371 	amdgpu_ring_write(ring, val);
372 }
373 
374 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
375 				  int mem_space, int opt, uint32_t addr0,
376 				  uint32_t addr1, uint32_t ref, uint32_t mask,
377 				  uint32_t inv)
378 {
379 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
380 	amdgpu_ring_write(ring,
381 				 /* memory (1) or register (0) */
382 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
383 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
384 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
385 				 WAIT_REG_MEM_ENGINE(eng_sel)));
386 
387 	if (mem_space)
388 		BUG_ON(addr0 & 0x3); /* Dword align */
389 	amdgpu_ring_write(ring, addr0);
390 	amdgpu_ring_write(ring, addr1);
391 	amdgpu_ring_write(ring, ref);
392 	amdgpu_ring_write(ring, mask);
393 	amdgpu_ring_write(ring, inv); /* poll interval */
394 }
395 
396 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
397 {
398 	struct amdgpu_device *adev = ring->adev;
399 	uint32_t scratch;
400 	uint32_t tmp = 0;
401 	unsigned i;
402 	int r;
403 
404 	r = amdgpu_gfx_scratch_get(adev, &scratch);
405 	if (r)
406 		return r;
407 
408 	WREG32(scratch, 0xCAFEDEAD);
409 	r = amdgpu_ring_alloc(ring, 3);
410 	if (r)
411 		goto error_free_scratch;
412 
413 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
414 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
415 	amdgpu_ring_write(ring, 0xDEADBEEF);
416 	amdgpu_ring_commit(ring);
417 
418 	for (i = 0; i < adev->usec_timeout; i++) {
419 		tmp = RREG32(scratch);
420 		if (tmp == 0xDEADBEEF)
421 			break;
422 		DRM_UDELAY(1);
423 	}
424 
425 	if (i >= adev->usec_timeout)
426 		r = -ETIMEDOUT;
427 
428 error_free_scratch:
429 	amdgpu_gfx_scratch_free(adev, scratch);
430 	return r;
431 }
432 
433 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
434 {
435 	struct amdgpu_device *adev = ring->adev;
436 	struct amdgpu_ib ib;
437 	struct dma_fence *f = NULL;
438 
439 	unsigned index;
440 	uint64_t gpu_addr;
441 	uint32_t tmp;
442 	long r;
443 
444 	r = amdgpu_device_wb_get(adev, &index);
445 	if (r)
446 		return r;
447 
448 	gpu_addr = adev->wb.gpu_addr + (index * 4);
449 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
450 	memset(&ib, 0, sizeof(ib));
451 	r = amdgpu_ib_get(adev, NULL, 16, &ib);
452 	if (r)
453 		goto err1;
454 
455 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
456 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
457 	ib.ptr[2] = lower_32_bits(gpu_addr);
458 	ib.ptr[3] = upper_32_bits(gpu_addr);
459 	ib.ptr[4] = 0xDEADBEEF;
460 	ib.length_dw = 5;
461 
462 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
463 	if (r)
464 		goto err2;
465 
466 	r = dma_fence_wait_timeout(f, false, timeout);
467 	if (r == 0) {
468 		r = -ETIMEDOUT;
469 		goto err2;
470 	} else if (r < 0) {
471 		goto err2;
472 	}
473 
474 	tmp = adev->wb.wb[index];
475 	if (tmp == 0xDEADBEEF)
476 		r = 0;
477 	else
478 		r = -EINVAL;
479 
480 err2:
481 	amdgpu_ib_free(adev, &ib, NULL);
482 	dma_fence_put(f);
483 err1:
484 	amdgpu_device_wb_free(adev, index);
485 	return r;
486 }
487 
488 
489 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
490 {
491 	release_firmware(adev->gfx.pfp_fw);
492 	adev->gfx.pfp_fw = NULL;
493 	release_firmware(adev->gfx.me_fw);
494 	adev->gfx.me_fw = NULL;
495 	release_firmware(adev->gfx.ce_fw);
496 	adev->gfx.ce_fw = NULL;
497 	release_firmware(adev->gfx.rlc_fw);
498 	adev->gfx.rlc_fw = NULL;
499 	release_firmware(adev->gfx.mec_fw);
500 	adev->gfx.mec_fw = NULL;
501 	release_firmware(adev->gfx.mec2_fw);
502 	adev->gfx.mec2_fw = NULL;
503 
504 	kfree(adev->gfx.rlc.register_list_format);
505 }
506 
507 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
508 {
509 	const struct rlc_firmware_header_v2_1 *rlc_hdr;
510 
511 	rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
512 	adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
513 	adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
514 	adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
515 	adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
516 	adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
517 	adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
518 	adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
519 	adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
520 	adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
521 	adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
522 	adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
523 	adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
524 	adev->gfx.rlc.reg_list_format_direct_reg_list_length =
525 			le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
526 }
527 
528 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
529 {
530 	adev->gfx.me_fw_write_wait = false;
531 	adev->gfx.mec_fw_write_wait = false;
532 
533 	switch (adev->asic_type) {
534 	case CHIP_VEGA10:
535 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
536 		    (adev->gfx.me_feature_version >= 42) &&
537 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
538 		    (adev->gfx.pfp_feature_version >= 42))
539 			adev->gfx.me_fw_write_wait = true;
540 
541 		if ((adev->gfx.mec_fw_version >=  0x00000193) &&
542 		    (adev->gfx.mec_feature_version >= 42))
543 			adev->gfx.mec_fw_write_wait = true;
544 		break;
545 	case CHIP_VEGA12:
546 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
547 		    (adev->gfx.me_feature_version >= 44) &&
548 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
549 		    (adev->gfx.pfp_feature_version >= 44))
550 			adev->gfx.me_fw_write_wait = true;
551 
552 		if ((adev->gfx.mec_fw_version >=  0x00000196) &&
553 		    (adev->gfx.mec_feature_version >= 44))
554 			adev->gfx.mec_fw_write_wait = true;
555 		break;
556 	case CHIP_VEGA20:
557 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
558 		    (adev->gfx.me_feature_version >= 44) &&
559 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
560 		    (adev->gfx.pfp_feature_version >= 44))
561 			adev->gfx.me_fw_write_wait = true;
562 
563 		if ((adev->gfx.mec_fw_version >=  0x00000197) &&
564 		    (adev->gfx.mec_feature_version >= 44))
565 			adev->gfx.mec_fw_write_wait = true;
566 		break;
567 	case CHIP_RAVEN:
568 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
569 		    (adev->gfx.me_feature_version >= 42) &&
570 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
571 		    (adev->gfx.pfp_feature_version >= 42))
572 			adev->gfx.me_fw_write_wait = true;
573 
574 		if ((adev->gfx.mec_fw_version >=  0x00000192) &&
575 		    (adev->gfx.mec_feature_version >= 42))
576 			adev->gfx.mec_fw_write_wait = true;
577 		break;
578 	default:
579 		break;
580 	}
581 }
582 
583 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
584 {
585 	switch (adev->asic_type) {
586 	case CHIP_VEGA10:
587 	case CHIP_VEGA12:
588 	case CHIP_VEGA20:
589 		break;
590 	case CHIP_RAVEN:
591 		if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
592 			break;
593 		if ((adev->gfx.rlc_fw_version != 106 &&
594 		     adev->gfx.rlc_fw_version < 531) ||
595 		    (adev->gfx.rlc_fw_version == 53815) ||
596 		    (adev->gfx.rlc_feature_version < 1) ||
597 		    !adev->gfx.rlc.is_rlc_v2_1)
598 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
599 		break;
600 	default:
601 		break;
602 	}
603 }
604 
605 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
606 {
607 	const char *chip_name;
608 	char fw_name[30];
609 	int err;
610 	struct amdgpu_firmware_info *info = NULL;
611 	const struct common_firmware_header *header = NULL;
612 	const struct gfx_firmware_header_v1_0 *cp_hdr;
613 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
614 	unsigned int *tmp = NULL;
615 	unsigned int i = 0;
616 	uint16_t version_major;
617 	uint16_t version_minor;
618 	uint32_t smu_version;
619 
620 	DRM_DEBUG("\n");
621 
622 	switch (adev->asic_type) {
623 	case CHIP_VEGA10:
624 		chip_name = "vega10";
625 		break;
626 	case CHIP_VEGA12:
627 		chip_name = "vega12";
628 		break;
629 	case CHIP_VEGA20:
630 		chip_name = "vega20";
631 		break;
632 	case CHIP_RAVEN:
633 		if (adev->rev_id >= 8)
634 			chip_name = "raven2";
635 		else if (adev->pdev->device == 0x15d8)
636 			chip_name = "picasso";
637 		else
638 			chip_name = "raven";
639 		break;
640 	default:
641 		BUG();
642 	}
643 
644 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
645 	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
646 	if (err)
647 		goto out;
648 	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
649 	if (err)
650 		goto out;
651 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
652 	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
653 	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
654 
655 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
656 	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
657 	if (err)
658 		goto out;
659 	err = amdgpu_ucode_validate(adev->gfx.me_fw);
660 	if (err)
661 		goto out;
662 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
663 	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
664 	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
665 
666 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
667 	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
668 	if (err)
669 		goto out;
670 	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
671 	if (err)
672 		goto out;
673 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
674 	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
675 	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
676 
677 	/*
678 	 * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
679 	 * instead of picasso_rlc.bin.
680 	 * Judgment method:
681 	 * PCO AM4: revision >= 0xC8 && revision <= 0xCF
682 	 *          or revision >= 0xD8 && revision <= 0xDF
683 	 * otherwise is PCO FP5
684 	 */
685 	if (!strcmp(chip_name, "picasso") &&
686 		(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
687 		((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
688 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
689 	else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
690 		(smu_version >= 0x41e2b))
691 		/**
692 		*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
693 		*/
694 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
695 	else
696 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
697 	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
698 	if (err)
699 		goto out;
700 	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
701 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
702 
703 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
704 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
705 	if (version_major == 2 && version_minor == 1)
706 		adev->gfx.rlc.is_rlc_v2_1 = true;
707 
708 	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
709 	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
710 	adev->gfx.rlc.save_and_restore_offset =
711 			le32_to_cpu(rlc_hdr->save_and_restore_offset);
712 	adev->gfx.rlc.clear_state_descriptor_offset =
713 			le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
714 	adev->gfx.rlc.avail_scratch_ram_locations =
715 			le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
716 	adev->gfx.rlc.reg_restore_list_size =
717 			le32_to_cpu(rlc_hdr->reg_restore_list_size);
718 	adev->gfx.rlc.reg_list_format_start =
719 			le32_to_cpu(rlc_hdr->reg_list_format_start);
720 	adev->gfx.rlc.reg_list_format_separate_start =
721 			le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
722 	adev->gfx.rlc.starting_offsets_start =
723 			le32_to_cpu(rlc_hdr->starting_offsets_start);
724 	adev->gfx.rlc.reg_list_format_size_bytes =
725 			le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
726 	adev->gfx.rlc.reg_list_size_bytes =
727 			le32_to_cpu(rlc_hdr->reg_list_size_bytes);
728 	adev->gfx.rlc.register_list_format =
729 			kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
730 				adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
731 	if (!adev->gfx.rlc.register_list_format) {
732 		err = -ENOMEM;
733 		goto out;
734 	}
735 
736 	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
737 			le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
738 	for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
739 		adev->gfx.rlc.register_list_format[i] =	le32_to_cpu(tmp[i]);
740 
741 	adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
742 
743 	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
744 			le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
745 	for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
746 		adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
747 
748 	if (adev->gfx.rlc.is_rlc_v2_1)
749 		gfx_v9_0_init_rlc_ext_microcode(adev);
750 
751 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
752 	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
753 	if (err)
754 		goto out;
755 	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
756 	if (err)
757 		goto out;
758 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
759 	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
760 	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
761 
762 
763 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
764 	err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
765 	if (!err) {
766 		err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
767 		if (err)
768 			goto out;
769 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
770 		adev->gfx.mec2_fw->data;
771 		adev->gfx.mec2_fw_version =
772 		le32_to_cpu(cp_hdr->header.ucode_version);
773 		adev->gfx.mec2_feature_version =
774 		le32_to_cpu(cp_hdr->ucode_feature_version);
775 	} else {
776 		err = 0;
777 		adev->gfx.mec2_fw = NULL;
778 	}
779 
780 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
781 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
782 		info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
783 		info->fw = adev->gfx.pfp_fw;
784 		header = (const struct common_firmware_header *)info->fw->data;
785 		adev->firmware.fw_size +=
786 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
787 
788 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
789 		info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
790 		info->fw = adev->gfx.me_fw;
791 		header = (const struct common_firmware_header *)info->fw->data;
792 		adev->firmware.fw_size +=
793 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
794 
795 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
796 		info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
797 		info->fw = adev->gfx.ce_fw;
798 		header = (const struct common_firmware_header *)info->fw->data;
799 		adev->firmware.fw_size +=
800 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
801 
802 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
803 		info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
804 		info->fw = adev->gfx.rlc_fw;
805 		header = (const struct common_firmware_header *)info->fw->data;
806 		adev->firmware.fw_size +=
807 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
808 
809 		if (adev->gfx.rlc.is_rlc_v2_1 &&
810 		    adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
811 		    adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
812 		    adev->gfx.rlc.save_restore_list_srm_size_bytes) {
813 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
814 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
815 			info->fw = adev->gfx.rlc_fw;
816 			adev->firmware.fw_size +=
817 				ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
818 
819 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
820 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
821 			info->fw = adev->gfx.rlc_fw;
822 			adev->firmware.fw_size +=
823 				ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
824 
825 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
826 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
827 			info->fw = adev->gfx.rlc_fw;
828 			adev->firmware.fw_size +=
829 				ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
830 		}
831 
832 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
833 		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
834 		info->fw = adev->gfx.mec_fw;
835 		header = (const struct common_firmware_header *)info->fw->data;
836 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
837 		adev->firmware.fw_size +=
838 			ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
839 
840 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
841 		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
842 		info->fw = adev->gfx.mec_fw;
843 		adev->firmware.fw_size +=
844 			ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
845 
846 		if (adev->gfx.mec2_fw) {
847 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
848 			info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
849 			info->fw = adev->gfx.mec2_fw;
850 			header = (const struct common_firmware_header *)info->fw->data;
851 			cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
852 			adev->firmware.fw_size +=
853 				ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
854 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
855 			info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
856 			info->fw = adev->gfx.mec2_fw;
857 			adev->firmware.fw_size +=
858 				ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
859 		}
860 
861 	}
862 
863 out:
864 	gfx_v9_0_check_if_need_gfxoff(adev);
865 	gfx_v9_0_check_fw_write_wait(adev);
866 	if (err) {
867 		dev_err(adev->dev,
868 			"gfx9: Failed to load firmware \"%s\"\n",
869 			fw_name);
870 		release_firmware(adev->gfx.pfp_fw);
871 		adev->gfx.pfp_fw = NULL;
872 		release_firmware(adev->gfx.me_fw);
873 		adev->gfx.me_fw = NULL;
874 		release_firmware(adev->gfx.ce_fw);
875 		adev->gfx.ce_fw = NULL;
876 		release_firmware(adev->gfx.rlc_fw);
877 		adev->gfx.rlc_fw = NULL;
878 		release_firmware(adev->gfx.mec_fw);
879 		adev->gfx.mec_fw = NULL;
880 		release_firmware(adev->gfx.mec2_fw);
881 		adev->gfx.mec2_fw = NULL;
882 	}
883 	return err;
884 }
885 
886 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
887 {
888 	u32 count = 0;
889 	const struct cs_section_def *sect = NULL;
890 	const struct cs_extent_def *ext = NULL;
891 
892 	/* begin clear state */
893 	count += 2;
894 	/* context control state */
895 	count += 3;
896 
897 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
898 		for (ext = sect->section; ext->extent != NULL; ++ext) {
899 			if (sect->id == SECT_CONTEXT)
900 				count += 2 + ext->reg_count;
901 			else
902 				return 0;
903 		}
904 	}
905 
906 	/* end clear state */
907 	count += 2;
908 	/* clear state */
909 	count += 2;
910 
911 	return count;
912 }
913 
914 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
915 				    volatile u32 *buffer)
916 {
917 	u32 count = 0, i;
918 	const struct cs_section_def *sect = NULL;
919 	const struct cs_extent_def *ext = NULL;
920 
921 	if (adev->gfx.rlc.cs_data == NULL)
922 		return;
923 	if (buffer == NULL)
924 		return;
925 
926 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
927 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
928 
929 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
930 	buffer[count++] = cpu_to_le32(0x80000000);
931 	buffer[count++] = cpu_to_le32(0x80000000);
932 
933 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
934 		for (ext = sect->section; ext->extent != NULL; ++ext) {
935 			if (sect->id == SECT_CONTEXT) {
936 				buffer[count++] =
937 					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
938 				buffer[count++] = cpu_to_le32(ext->reg_index -
939 						PACKET3_SET_CONTEXT_REG_START);
940 				for (i = 0; i < ext->reg_count; i++)
941 					buffer[count++] = cpu_to_le32(ext->extent[i]);
942 			} else {
943 				return;
944 			}
945 		}
946 	}
947 
948 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
949 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
950 
951 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
952 	buffer[count++] = cpu_to_le32(0);
953 }
954 
955 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
956 {
957 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
958 	uint32_t pg_always_on_cu_num = 2;
959 	uint32_t always_on_cu_num;
960 	uint32_t i, j, k;
961 	uint32_t mask, cu_bitmap, counter;
962 
963 	if (adev->flags & AMD_IS_APU)
964 		always_on_cu_num = 4;
965 	else if (adev->asic_type == CHIP_VEGA12)
966 		always_on_cu_num = 8;
967 	else
968 		always_on_cu_num = 12;
969 
970 	mutex_lock(&adev->grbm_idx_mutex);
971 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
972 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
973 			mask = 1;
974 			cu_bitmap = 0;
975 			counter = 0;
976 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
977 
978 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
979 				if (cu_info->bitmap[i][j] & mask) {
980 					if (counter == pg_always_on_cu_num)
981 						WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
982 					if (counter < always_on_cu_num)
983 						cu_bitmap |= mask;
984 					else
985 						break;
986 					counter++;
987 				}
988 				mask <<= 1;
989 			}
990 
991 			WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
992 			cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
993 		}
994 	}
995 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
996 	mutex_unlock(&adev->grbm_idx_mutex);
997 }
998 
999 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1000 {
1001 	uint32_t data;
1002 
1003 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1004 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1005 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1006 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1007 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1008 
1009 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1010 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1011 
1012 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1013 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1014 
1015 	mutex_lock(&adev->grbm_idx_mutex);
1016 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1017 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1018 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1019 
1020 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
1021 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1022 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1023 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1024 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1025 
1026 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1027 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1028 	data &= 0x0000FFFF;
1029 	data |= 0x00C00000;
1030 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1031 
1032 	/*
1033 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1034 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1035 	 */
1036 
1037 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1038 	 * but used for RLC_LB_CNTL configuration */
1039 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1040 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1041 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1042 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1043 	mutex_unlock(&adev->grbm_idx_mutex);
1044 
1045 	gfx_v9_0_init_always_on_cu_mask(adev);
1046 }
1047 
1048 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1049 {
1050 	uint32_t data;
1051 
1052 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1053 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1054 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1055 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1056 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1057 
1058 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1059 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1060 
1061 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1062 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1063 
1064 	mutex_lock(&adev->grbm_idx_mutex);
1065 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1066 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1067 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1068 
1069 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
1070 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1071 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1072 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1073 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1074 
1075 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1076 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1077 	data &= 0x0000FFFF;
1078 	data |= 0x00C00000;
1079 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1080 
1081 	/*
1082 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1083 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1084 	 */
1085 
1086 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1087 	 * but used for RLC_LB_CNTL configuration */
1088 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1089 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1090 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1091 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1092 	mutex_unlock(&adev->grbm_idx_mutex);
1093 
1094 	gfx_v9_0_init_always_on_cu_mask(adev);
1095 }
1096 
1097 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1098 {
1099 	WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1100 }
1101 
1102 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1103 {
1104 	return 5;
1105 }
1106 
1107 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1108 {
1109 	const struct cs_section_def *cs_data;
1110 	int r;
1111 
1112 	adev->gfx.rlc.cs_data = gfx9_cs_data;
1113 
1114 	cs_data = adev->gfx.rlc.cs_data;
1115 
1116 	if (cs_data) {
1117 		/* init clear state block */
1118 		r = amdgpu_gfx_rlc_init_csb(adev);
1119 		if (r)
1120 			return r;
1121 	}
1122 
1123 	if (adev->asic_type == CHIP_RAVEN) {
1124 		/* TODO: double check the cp_table_size for RV */
1125 		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1126 		r = amdgpu_gfx_rlc_init_cpt(adev);
1127 		if (r)
1128 			return r;
1129 	}
1130 
1131 	switch (adev->asic_type) {
1132 	case CHIP_RAVEN:
1133 		gfx_v9_0_init_lbpw(adev);
1134 		break;
1135 	case CHIP_VEGA20:
1136 		gfx_v9_4_init_lbpw(adev);
1137 		break;
1138 	default:
1139 		break;
1140 	}
1141 
1142 	return 0;
1143 }
1144 
1145 static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
1146 {
1147 	int r;
1148 
1149 	r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
1150 	if (unlikely(r != 0))
1151 		return r;
1152 
1153 	r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1154 			AMDGPU_GEM_DOMAIN_VRAM);
1155 	if (!r)
1156 		adev->gfx.rlc.clear_state_gpu_addr =
1157 			amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1158 
1159 	amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1160 
1161 	return r;
1162 }
1163 
1164 static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
1165 {
1166 	int r;
1167 
1168 	if (!adev->gfx.rlc.clear_state_obj)
1169 		return;
1170 
1171 	r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1172 	if (likely(r == 0)) {
1173 		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1174 		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1175 	}
1176 }
1177 
1178 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1179 {
1180 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1181 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1182 }
1183 
1184 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1185 {
1186 	int r;
1187 	u32 *hpd;
1188 	const __le32 *fw_data;
1189 	unsigned fw_size;
1190 	u32 *fw;
1191 	size_t mec_hpd_size;
1192 
1193 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1194 
1195 	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1196 
1197 	/* take ownership of the relevant compute queues */
1198 	amdgpu_gfx_compute_queue_acquire(adev);
1199 	mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1200 
1201 	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1202 				      AMDGPU_GEM_DOMAIN_VRAM,
1203 				      &adev->gfx.mec.hpd_eop_obj,
1204 				      &adev->gfx.mec.hpd_eop_gpu_addr,
1205 				      (void **)&hpd);
1206 	if (r) {
1207 		dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1208 		gfx_v9_0_mec_fini(adev);
1209 		return r;
1210 	}
1211 
1212 	memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1213 
1214 	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1215 	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1216 
1217 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1218 
1219 	fw_data = (const __le32 *)
1220 		(adev->gfx.mec_fw->data +
1221 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1222 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1223 
1224 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1225 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1226 				      &adev->gfx.mec.mec_fw_obj,
1227 				      &adev->gfx.mec.mec_fw_gpu_addr,
1228 				      (void **)&fw);
1229 	if (r) {
1230 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1231 		gfx_v9_0_mec_fini(adev);
1232 		return r;
1233 	}
1234 
1235 	memcpy(fw, fw_data, fw_size);
1236 
1237 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1238 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1239 
1240 	return 0;
1241 }
1242 
1243 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1244 {
1245 	WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1246 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1247 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1248 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
1249 		(SQ_IND_INDEX__FORCE_READ_MASK));
1250 	return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1251 }
1252 
1253 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1254 			   uint32_t wave, uint32_t thread,
1255 			   uint32_t regno, uint32_t num, uint32_t *out)
1256 {
1257 	WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1258 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1259 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1260 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
1261 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1262 		(SQ_IND_INDEX__FORCE_READ_MASK) |
1263 		(SQ_IND_INDEX__AUTO_INCR_MASK));
1264 	while (num--)
1265 		*(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1266 }
1267 
1268 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1269 {
1270 	/* type 1 wave data */
1271 	dst[(*no_fields)++] = 1;
1272 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1273 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1274 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1275 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1276 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1277 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1278 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1279 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1280 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1281 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1282 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1283 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1284 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1285 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1286 }
1287 
1288 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1289 				     uint32_t wave, uint32_t start,
1290 				     uint32_t size, uint32_t *dst)
1291 {
1292 	wave_read_regs(
1293 		adev, simd, wave, 0,
1294 		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1295 }
1296 
1297 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1298 				     uint32_t wave, uint32_t thread,
1299 				     uint32_t start, uint32_t size,
1300 				     uint32_t *dst)
1301 {
1302 	wave_read_regs(
1303 		adev, simd, wave, thread,
1304 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1305 }
1306 
1307 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1308 				  u32 me, u32 pipe, u32 q)
1309 {
1310 	soc15_grbm_select(adev, me, pipe, q, 0);
1311 }
1312 
1313 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1314 	.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1315 	.select_se_sh = &gfx_v9_0_select_se_sh,
1316 	.read_wave_data = &gfx_v9_0_read_wave_data,
1317 	.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1318 	.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1319 	.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
1320 };
1321 
1322 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1323 {
1324 	u32 gb_addr_config;
1325 	int err;
1326 
1327 	adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1328 
1329 	switch (adev->asic_type) {
1330 	case CHIP_VEGA10:
1331 		adev->gfx.config.max_hw_contexts = 8;
1332 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1333 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1334 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1335 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1336 		gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1337 		break;
1338 	case CHIP_VEGA12:
1339 		adev->gfx.config.max_hw_contexts = 8;
1340 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1341 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1342 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1343 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1344 		gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
1345 		DRM_INFO("fix gfx.config for vega12\n");
1346 		break;
1347 	case CHIP_VEGA20:
1348 		adev->gfx.config.max_hw_contexts = 8;
1349 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1350 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1351 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1352 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1353 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1354 		gb_addr_config &= ~0xf3e777ff;
1355 		gb_addr_config |= 0x22014042;
1356 		/* check vbios table if gpu info is not available */
1357 		err = amdgpu_atomfirmware_get_gfx_info(adev);
1358 		if (err)
1359 			return err;
1360 		break;
1361 	case CHIP_RAVEN:
1362 		adev->gfx.config.max_hw_contexts = 8;
1363 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1364 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1365 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1366 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1367 		if (adev->rev_id >= 8)
1368 			gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
1369 		else
1370 			gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1371 		break;
1372 	default:
1373 		BUG();
1374 		break;
1375 	}
1376 
1377 	adev->gfx.config.gb_addr_config = gb_addr_config;
1378 
1379 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1380 			REG_GET_FIELD(
1381 					adev->gfx.config.gb_addr_config,
1382 					GB_ADDR_CONFIG,
1383 					NUM_PIPES);
1384 
1385 	adev->gfx.config.max_tile_pipes =
1386 		adev->gfx.config.gb_addr_config_fields.num_pipes;
1387 
1388 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1389 			REG_GET_FIELD(
1390 					adev->gfx.config.gb_addr_config,
1391 					GB_ADDR_CONFIG,
1392 					NUM_BANKS);
1393 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1394 			REG_GET_FIELD(
1395 					adev->gfx.config.gb_addr_config,
1396 					GB_ADDR_CONFIG,
1397 					MAX_COMPRESSED_FRAGS);
1398 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1399 			REG_GET_FIELD(
1400 					adev->gfx.config.gb_addr_config,
1401 					GB_ADDR_CONFIG,
1402 					NUM_RB_PER_SE);
1403 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1404 			REG_GET_FIELD(
1405 					adev->gfx.config.gb_addr_config,
1406 					GB_ADDR_CONFIG,
1407 					NUM_SHADER_ENGINES);
1408 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1409 			REG_GET_FIELD(
1410 					adev->gfx.config.gb_addr_config,
1411 					GB_ADDR_CONFIG,
1412 					PIPE_INTERLEAVE_SIZE));
1413 
1414 	return 0;
1415 }
1416 
1417 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1418 				   struct amdgpu_ngg_buf *ngg_buf,
1419 				   int size_se,
1420 				   int default_size_se)
1421 {
1422 	int r;
1423 
1424 	if (size_se < 0) {
1425 		dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1426 		return -EINVAL;
1427 	}
1428 	size_se = size_se ? size_se : default_size_se;
1429 
1430 	ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1431 	r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1432 				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1433 				    &ngg_buf->bo,
1434 				    &ngg_buf->gpu_addr,
1435 				    NULL);
1436 	if (r) {
1437 		dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1438 		return r;
1439 	}
1440 	ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1441 
1442 	return r;
1443 }
1444 
1445 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1446 {
1447 	int i;
1448 
1449 	for (i = 0; i < NGG_BUF_MAX; i++)
1450 		amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1451 				      &adev->gfx.ngg.buf[i].gpu_addr,
1452 				      NULL);
1453 
1454 	memset(&adev->gfx.ngg.buf[0], 0,
1455 			sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1456 
1457 	adev->gfx.ngg.init = false;
1458 
1459 	return 0;
1460 }
1461 
1462 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1463 {
1464 	int r;
1465 
1466 	if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1467 		return 0;
1468 
1469 	/* GDS reserve memory: 64 bytes alignment */
1470 	adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1471 	adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1472 	adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1473 	adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
1474 	adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
1475 
1476 	/* Primitive Buffer */
1477 	r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1478 				    amdgpu_prim_buf_per_se,
1479 				    64 * 1024);
1480 	if (r) {
1481 		dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1482 		goto err;
1483 	}
1484 
1485 	/* Position Buffer */
1486 	r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1487 				    amdgpu_pos_buf_per_se,
1488 				    256 * 1024);
1489 	if (r) {
1490 		dev_err(adev->dev, "Failed to create Position Buffer\n");
1491 		goto err;
1492 	}
1493 
1494 	/* Control Sideband */
1495 	r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1496 				    amdgpu_cntl_sb_buf_per_se,
1497 				    256);
1498 	if (r) {
1499 		dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1500 		goto err;
1501 	}
1502 
1503 	/* Parameter Cache, not created by default */
1504 	if (amdgpu_param_buf_per_se <= 0)
1505 		goto out;
1506 
1507 	r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1508 				    amdgpu_param_buf_per_se,
1509 				    512 * 1024);
1510 	if (r) {
1511 		dev_err(adev->dev, "Failed to create Parameter Cache\n");
1512 		goto err;
1513 	}
1514 
1515 out:
1516 	adev->gfx.ngg.init = true;
1517 	return 0;
1518 err:
1519 	gfx_v9_0_ngg_fini(adev);
1520 	return r;
1521 }
1522 
1523 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1524 {
1525 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1526 	int r;
1527 	u32 data, base;
1528 
1529 	if (!amdgpu_ngg)
1530 		return 0;
1531 
1532 	/* Program buffer size */
1533 	data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
1534 			     adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
1535 	data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
1536 			     adev->gfx.ngg.buf[NGG_POS].size >> 8);
1537 	WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1538 
1539 	data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
1540 			     adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
1541 	data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
1542 			     adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
1543 	WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1544 
1545 	/* Program buffer base address */
1546 	base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1547 	data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1548 	WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1549 
1550 	base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1551 	data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1552 	WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1553 
1554 	base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1555 	data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1556 	WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1557 
1558 	base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1559 	data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1560 	WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1561 
1562 	base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1563 	data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1564 	WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1565 
1566 	base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1567 	data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1568 	WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1569 
1570 	/* Clear GDS reserved memory */
1571 	r = amdgpu_ring_alloc(ring, 17);
1572 	if (r) {
1573 		DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
1574 			  ring->name, r);
1575 		return r;
1576 	}
1577 
1578 	gfx_v9_0_write_data_to_reg(ring, 0, false,
1579 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
1580 			           (adev->gds.mem.total_size +
1581 				    adev->gfx.ngg.gds_reserve_size));
1582 
1583 	amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1584 	amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1585 				PACKET3_DMA_DATA_DST_SEL(1) |
1586 				PACKET3_DMA_DATA_SRC_SEL(2)));
1587 	amdgpu_ring_write(ring, 0);
1588 	amdgpu_ring_write(ring, 0);
1589 	amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1590 	amdgpu_ring_write(ring, 0);
1591 	amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
1592 				adev->gfx.ngg.gds_reserve_size);
1593 
1594 	gfx_v9_0_write_data_to_reg(ring, 0, false,
1595 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
1596 
1597 	amdgpu_ring_commit(ring);
1598 
1599 	return 0;
1600 }
1601 
1602 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1603 				      int mec, int pipe, int queue)
1604 {
1605 	int r;
1606 	unsigned irq_type;
1607 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1608 
1609 	ring = &adev->gfx.compute_ring[ring_id];
1610 
1611 	/* mec0 is me1 */
1612 	ring->me = mec + 1;
1613 	ring->pipe = pipe;
1614 	ring->queue = queue;
1615 
1616 	ring->ring_obj = NULL;
1617 	ring->use_doorbell = true;
1618 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1619 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1620 				+ (ring_id * GFX9_MEC_HPD_SIZE);
1621 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1622 
1623 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1624 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1625 		+ ring->pipe;
1626 
1627 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1628 	r = amdgpu_ring_init(adev, ring, 1024,
1629 			     &adev->gfx.eop_irq, irq_type);
1630 	if (r)
1631 		return r;
1632 
1633 
1634 	return 0;
1635 }
1636 
1637 static int gfx_v9_0_sw_init(void *handle)
1638 {
1639 	int i, j, k, r, ring_id;
1640 	struct amdgpu_ring *ring;
1641 	struct amdgpu_kiq *kiq;
1642 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1643 
1644 	switch (adev->asic_type) {
1645 	case CHIP_VEGA10:
1646 	case CHIP_VEGA12:
1647 	case CHIP_VEGA20:
1648 	case CHIP_RAVEN:
1649 		adev->gfx.mec.num_mec = 2;
1650 		break;
1651 	default:
1652 		adev->gfx.mec.num_mec = 1;
1653 		break;
1654 	}
1655 
1656 	adev->gfx.mec.num_pipe_per_mec = 4;
1657 	adev->gfx.mec.num_queue_per_pipe = 8;
1658 
1659 	/* EOP Event */
1660 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1661 	if (r)
1662 		return r;
1663 
1664 	/* Privileged reg */
1665 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1666 			      &adev->gfx.priv_reg_irq);
1667 	if (r)
1668 		return r;
1669 
1670 	/* Privileged inst */
1671 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1672 			      &adev->gfx.priv_inst_irq);
1673 	if (r)
1674 		return r;
1675 
1676 	/* ECC error */
1677 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
1678 			      &adev->gfx.cp_ecc_error_irq);
1679 	if (r)
1680 		return r;
1681 
1682 	/* FUE error */
1683 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
1684 			      &adev->gfx.cp_ecc_error_irq);
1685 	if (r)
1686 		return r;
1687 
1688 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1689 
1690 	gfx_v9_0_scratch_init(adev);
1691 
1692 	r = gfx_v9_0_init_microcode(adev);
1693 	if (r) {
1694 		DRM_ERROR("Failed to load gfx firmware!\n");
1695 		return r;
1696 	}
1697 
1698 	r = adev->gfx.rlc.funcs->init(adev);
1699 	if (r) {
1700 		DRM_ERROR("Failed to init rlc BOs!\n");
1701 		return r;
1702 	}
1703 
1704 	r = gfx_v9_0_mec_init(adev);
1705 	if (r) {
1706 		DRM_ERROR("Failed to init MEC BOs!\n");
1707 		return r;
1708 	}
1709 
1710 	/* set up the gfx ring */
1711 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1712 		ring = &adev->gfx.gfx_ring[i];
1713 		ring->ring_obj = NULL;
1714 		if (!i)
1715 			sprintf(ring->name, "gfx");
1716 		else
1717 			sprintf(ring->name, "gfx_%d", i);
1718 		ring->use_doorbell = true;
1719 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1720 		r = amdgpu_ring_init(adev, ring, 1024,
1721 				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1722 		if (r)
1723 			return r;
1724 	}
1725 
1726 	/* set up the compute queues - allocate horizontally across pipes */
1727 	ring_id = 0;
1728 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1729 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1730 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1731 				if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1732 					continue;
1733 
1734 				r = gfx_v9_0_compute_ring_init(adev,
1735 							       ring_id,
1736 							       i, k, j);
1737 				if (r)
1738 					return r;
1739 
1740 				ring_id++;
1741 			}
1742 		}
1743 	}
1744 
1745 	r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1746 	if (r) {
1747 		DRM_ERROR("Failed to init KIQ BOs!\n");
1748 		return r;
1749 	}
1750 
1751 	kiq = &adev->gfx.kiq;
1752 	r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1753 	if (r)
1754 		return r;
1755 
1756 	/* create MQD for all compute queues as wel as KIQ for SRIOV case */
1757 	r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1758 	if (r)
1759 		return r;
1760 
1761 	adev->gfx.ce_ram_size = 0x8000;
1762 
1763 	r = gfx_v9_0_gpu_early_init(adev);
1764 	if (r)
1765 		return r;
1766 
1767 	r = gfx_v9_0_ngg_init(adev);
1768 	if (r)
1769 		return r;
1770 
1771 	return 0;
1772 }
1773 
1774 
1775 static int gfx_v9_0_sw_fini(void *handle)
1776 {
1777 	int i;
1778 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1779 
1780 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
1781 			adev->gfx.ras_if) {
1782 		struct ras_common_if *ras_if = adev->gfx.ras_if;
1783 		struct ras_ih_if ih_info = {
1784 			.head = *ras_if,
1785 		};
1786 
1787 		amdgpu_ras_debugfs_remove(adev, ras_if);
1788 		amdgpu_ras_sysfs_remove(adev, ras_if);
1789 		amdgpu_ras_interrupt_remove_handler(adev,  &ih_info);
1790 		amdgpu_ras_feature_enable(adev, ras_if, 0);
1791 		kfree(ras_if);
1792 	}
1793 
1794 	amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1795 	amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1796 	amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1797 
1798 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1799 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1800 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
1801 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1802 
1803 	amdgpu_gfx_compute_mqd_sw_fini(adev);
1804 	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1805 	amdgpu_gfx_kiq_fini(adev);
1806 
1807 	gfx_v9_0_mec_fini(adev);
1808 	gfx_v9_0_ngg_fini(adev);
1809 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1810 				&adev->gfx.rlc.clear_state_gpu_addr,
1811 				(void **)&adev->gfx.rlc.cs_ptr);
1812 	if (adev->asic_type == CHIP_RAVEN) {
1813 		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1814 				&adev->gfx.rlc.cp_table_gpu_addr,
1815 				(void **)&adev->gfx.rlc.cp_table_ptr);
1816 	}
1817 	gfx_v9_0_free_microcode(adev);
1818 
1819 	return 0;
1820 }
1821 
1822 
1823 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1824 {
1825 	/* TODO */
1826 }
1827 
1828 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1829 {
1830 	u32 data;
1831 
1832 	if (instance == 0xffffffff)
1833 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1834 	else
1835 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1836 
1837 	if (se_num == 0xffffffff)
1838 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1839 	else
1840 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1841 
1842 	if (sh_num == 0xffffffff)
1843 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1844 	else
1845 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1846 
1847 	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1848 }
1849 
1850 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1851 {
1852 	u32 data, mask;
1853 
1854 	data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1855 	data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1856 
1857 	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1858 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1859 
1860 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1861 					 adev->gfx.config.max_sh_per_se);
1862 
1863 	return (~data) & mask;
1864 }
1865 
1866 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1867 {
1868 	int i, j;
1869 	u32 data;
1870 	u32 active_rbs = 0;
1871 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1872 					adev->gfx.config.max_sh_per_se;
1873 
1874 	mutex_lock(&adev->grbm_idx_mutex);
1875 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1876 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1877 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1878 			data = gfx_v9_0_get_rb_active_bitmap(adev);
1879 			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1880 					       rb_bitmap_width_per_sh);
1881 		}
1882 	}
1883 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1884 	mutex_unlock(&adev->grbm_idx_mutex);
1885 
1886 	adev->gfx.config.backend_enable_mask = active_rbs;
1887 	adev->gfx.config.num_rbs = hweight32(active_rbs);
1888 }
1889 
1890 #define DEFAULT_SH_MEM_BASES	(0x6000)
1891 #define FIRST_COMPUTE_VMID	(8)
1892 #define LAST_COMPUTE_VMID	(16)
1893 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1894 {
1895 	int i;
1896 	uint32_t sh_mem_config;
1897 	uint32_t sh_mem_bases;
1898 
1899 	/*
1900 	 * Configure apertures:
1901 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1902 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1903 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1904 	 */
1905 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1906 
1907 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1908 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1909 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1910 
1911 	mutex_lock(&adev->srbm_mutex);
1912 	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1913 		soc15_grbm_select(adev, 0, 0, 0, i);
1914 		/* CP and shaders */
1915 		WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1916 		WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1917 	}
1918 	soc15_grbm_select(adev, 0, 0, 0, 0);
1919 	mutex_unlock(&adev->srbm_mutex);
1920 }
1921 
1922 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
1923 {
1924 	u32 tmp;
1925 	int i;
1926 
1927 	WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1928 
1929 	gfx_v9_0_tiling_mode_table_init(adev);
1930 
1931 	gfx_v9_0_setup_rb(adev);
1932 	gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1933 	adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
1934 
1935 	/* XXX SH_MEM regs */
1936 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1937 	mutex_lock(&adev->srbm_mutex);
1938 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
1939 		soc15_grbm_select(adev, 0, 0, 0, i);
1940 		/* CP and shaders */
1941 		if (i == 0) {
1942 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1943 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1944 			WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1945 			WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1946 		} else {
1947 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1948 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1949 			WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1950 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1951 				(adev->gmc.private_aperture_start >> 48));
1952 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1953 				(adev->gmc.shared_aperture_start >> 48));
1954 			WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1955 		}
1956 	}
1957 	soc15_grbm_select(adev, 0, 0, 0, 0);
1958 
1959 	mutex_unlock(&adev->srbm_mutex);
1960 
1961 	gfx_v9_0_init_compute_vmid(adev);
1962 
1963 	mutex_lock(&adev->grbm_idx_mutex);
1964 	/*
1965 	 * making sure that the following register writes will be broadcasted
1966 	 * to all the shaders
1967 	 */
1968 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1969 
1970 	WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1971 		   (adev->gfx.config.sc_prim_fifo_size_frontend <<
1972 			PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1973 		   (adev->gfx.config.sc_prim_fifo_size_backend <<
1974 			PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1975 		   (adev->gfx.config.sc_hiz_tile_fifo_size <<
1976 			PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1977 		   (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1978 			PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1979 	mutex_unlock(&adev->grbm_idx_mutex);
1980 
1981 }
1982 
1983 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1984 {
1985 	u32 i, j, k;
1986 	u32 mask;
1987 
1988 	mutex_lock(&adev->grbm_idx_mutex);
1989 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1990 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1991 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1992 			for (k = 0; k < adev->usec_timeout; k++) {
1993 				if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1994 					break;
1995 				udelay(1);
1996 			}
1997 			if (k == adev->usec_timeout) {
1998 				gfx_v9_0_select_se_sh(adev, 0xffffffff,
1999 						      0xffffffff, 0xffffffff);
2000 				mutex_unlock(&adev->grbm_idx_mutex);
2001 				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2002 					 i, j);
2003 				return;
2004 			}
2005 		}
2006 	}
2007 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2008 	mutex_unlock(&adev->grbm_idx_mutex);
2009 
2010 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2011 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2012 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2013 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2014 	for (k = 0; k < adev->usec_timeout; k++) {
2015 		if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2016 			break;
2017 		udelay(1);
2018 	}
2019 }
2020 
2021 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2022 					       bool enable)
2023 {
2024 	u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2025 
2026 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2027 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2028 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2029 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2030 
2031 	WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2032 }
2033 
2034 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2035 {
2036 	/* csib */
2037 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2038 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
2039 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2040 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2041 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2042 			adev->gfx.rlc.clear_state_size);
2043 }
2044 
2045 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2046 				int indirect_offset,
2047 				int list_size,
2048 				int *unique_indirect_regs,
2049 				int unique_indirect_reg_count,
2050 				int *indirect_start_offsets,
2051 				int *indirect_start_offsets_count,
2052 				int max_start_offsets_count)
2053 {
2054 	int idx;
2055 
2056 	for (; indirect_offset < list_size; indirect_offset++) {
2057 		WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2058 		indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2059 		*indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2060 
2061 		while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2062 			indirect_offset += 2;
2063 
2064 			/* look for the matching indice */
2065 			for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2066 				if (unique_indirect_regs[idx] ==
2067 					register_list_format[indirect_offset] ||
2068 					!unique_indirect_regs[idx])
2069 					break;
2070 			}
2071 
2072 			BUG_ON(idx >= unique_indirect_reg_count);
2073 
2074 			if (!unique_indirect_regs[idx])
2075 				unique_indirect_regs[idx] = register_list_format[indirect_offset];
2076 
2077 			indirect_offset++;
2078 		}
2079 	}
2080 }
2081 
2082 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2083 {
2084 	int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2085 	int unique_indirect_reg_count = 0;
2086 
2087 	int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2088 	int indirect_start_offsets_count = 0;
2089 
2090 	int list_size = 0;
2091 	int i = 0, j = 0;
2092 	u32 tmp = 0;
2093 
2094 	u32 *register_list_format =
2095 		kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2096 	if (!register_list_format)
2097 		return -ENOMEM;
2098 	memcpy(register_list_format, adev->gfx.rlc.register_list_format,
2099 		adev->gfx.rlc.reg_list_format_size_bytes);
2100 
2101 	/* setup unique_indirect_regs array and indirect_start_offsets array */
2102 	unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2103 	gfx_v9_1_parse_ind_reg_list(register_list_format,
2104 				    adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2105 				    adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2106 				    unique_indirect_regs,
2107 				    unique_indirect_reg_count,
2108 				    indirect_start_offsets,
2109 				    &indirect_start_offsets_count,
2110 				    ARRAY_SIZE(indirect_start_offsets));
2111 
2112 	/* enable auto inc in case it is disabled */
2113 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2114 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2115 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2116 
2117 	/* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2118 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2119 		RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2120 	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2121 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2122 			adev->gfx.rlc.register_restore[i]);
2123 
2124 	/* load indirect register */
2125 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2126 		adev->gfx.rlc.reg_list_format_start);
2127 
2128 	/* direct register portion */
2129 	for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2130 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2131 			register_list_format[i]);
2132 
2133 	/* indirect register portion */
2134 	while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2135 		if (register_list_format[i] == 0xFFFFFFFF) {
2136 			WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2137 			continue;
2138 		}
2139 
2140 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2141 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2142 
2143 		for (j = 0; j < unique_indirect_reg_count; j++) {
2144 			if (register_list_format[i] == unique_indirect_regs[j]) {
2145 				WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2146 				break;
2147 			}
2148 		}
2149 
2150 		BUG_ON(j >= unique_indirect_reg_count);
2151 
2152 		i++;
2153 	}
2154 
2155 	/* set save/restore list size */
2156 	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2157 	list_size = list_size >> 1;
2158 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2159 		adev->gfx.rlc.reg_restore_list_size);
2160 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2161 
2162 	/* write the starting offsets to RLC scratch ram */
2163 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2164 		adev->gfx.rlc.starting_offsets_start);
2165 	for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2166 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2167 		       indirect_start_offsets[i]);
2168 
2169 	/* load unique indirect regs*/
2170 	for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2171 		if (unique_indirect_regs[i] != 0) {
2172 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2173 			       + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2174 			       unique_indirect_regs[i] & 0x3FFFF);
2175 
2176 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2177 			       + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2178 			       unique_indirect_regs[i] >> 20);
2179 		}
2180 	}
2181 
2182 	kfree(register_list_format);
2183 	return 0;
2184 }
2185 
2186 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2187 {
2188 	WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2189 }
2190 
2191 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2192 					     bool enable)
2193 {
2194 	uint32_t data = 0;
2195 	uint32_t default_data = 0;
2196 
2197 	default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2198 	if (enable == true) {
2199 		/* enable GFXIP control over CGPG */
2200 		data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2201 		if(default_data != data)
2202 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2203 
2204 		/* update status */
2205 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2206 		data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2207 		if(default_data != data)
2208 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2209 	} else {
2210 		/* restore GFXIP control over GCPG */
2211 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2212 		if(default_data != data)
2213 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2214 	}
2215 }
2216 
2217 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2218 {
2219 	uint32_t data = 0;
2220 
2221 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2222 			      AMD_PG_SUPPORT_GFX_SMG |
2223 			      AMD_PG_SUPPORT_GFX_DMG)) {
2224 		/* init IDLE_POLL_COUNT = 60 */
2225 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2226 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2227 		data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2228 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2229 
2230 		/* init RLC PG Delay */
2231 		data = 0;
2232 		data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2233 		data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2234 		data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2235 		data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2236 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2237 
2238 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2239 		data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2240 		data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2241 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2242 
2243 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2244 		data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2245 		data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2246 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2247 
2248 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2249 		data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2250 
2251 		/* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2252 		data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2253 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2254 
2255 		pwr_10_0_gfxip_control_over_cgpg(adev, true);
2256 	}
2257 }
2258 
2259 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2260 						bool enable)
2261 {
2262 	uint32_t data = 0;
2263 	uint32_t default_data = 0;
2264 
2265 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2266 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2267 			     SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2268 			     enable ? 1 : 0);
2269 	if (default_data != data)
2270 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2271 }
2272 
2273 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2274 						bool enable)
2275 {
2276 	uint32_t data = 0;
2277 	uint32_t default_data = 0;
2278 
2279 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2280 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2281 			     SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2282 			     enable ? 1 : 0);
2283 	if(default_data != data)
2284 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2285 }
2286 
2287 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2288 					bool enable)
2289 {
2290 	uint32_t data = 0;
2291 	uint32_t default_data = 0;
2292 
2293 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2294 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2295 			     CP_PG_DISABLE,
2296 			     enable ? 0 : 1);
2297 	if(default_data != data)
2298 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2299 }
2300 
2301 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2302 						bool enable)
2303 {
2304 	uint32_t data, default_data;
2305 
2306 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2307 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2308 			     GFX_POWER_GATING_ENABLE,
2309 			     enable ? 1 : 0);
2310 	if(default_data != data)
2311 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2312 }
2313 
2314 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2315 						bool enable)
2316 {
2317 	uint32_t data, default_data;
2318 
2319 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2320 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2321 			     GFX_PIPELINE_PG_ENABLE,
2322 			     enable ? 1 : 0);
2323 	if(default_data != data)
2324 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2325 
2326 	if (!enable)
2327 		/* read any GFX register to wake up GFX */
2328 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2329 }
2330 
2331 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2332 						       bool enable)
2333 {
2334 	uint32_t data, default_data;
2335 
2336 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2337 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2338 			     STATIC_PER_CU_PG_ENABLE,
2339 			     enable ? 1 : 0);
2340 	if(default_data != data)
2341 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2342 }
2343 
2344 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2345 						bool enable)
2346 {
2347 	uint32_t data, default_data;
2348 
2349 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2350 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2351 			     DYN_PER_CU_PG_ENABLE,
2352 			     enable ? 1 : 0);
2353 	if(default_data != data)
2354 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2355 }
2356 
2357 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2358 {
2359 	gfx_v9_0_init_csb(adev);
2360 
2361 	/*
2362 	 * Rlc save restore list is workable since v2_1.
2363 	 * And it's needed by gfxoff feature.
2364 	 */
2365 	if (adev->gfx.rlc.is_rlc_v2_1) {
2366 		gfx_v9_1_init_rlc_save_restore_list(adev);
2367 		gfx_v9_0_enable_save_restore_machine(adev);
2368 	}
2369 
2370 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2371 			      AMD_PG_SUPPORT_GFX_SMG |
2372 			      AMD_PG_SUPPORT_GFX_DMG |
2373 			      AMD_PG_SUPPORT_CP |
2374 			      AMD_PG_SUPPORT_GDS |
2375 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
2376 		WREG32(mmRLC_JUMP_TABLE_RESTORE,
2377 		       adev->gfx.rlc.cp_table_gpu_addr >> 8);
2378 		gfx_v9_0_init_gfx_power_gating(adev);
2379 	}
2380 }
2381 
2382 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2383 {
2384 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2385 	gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2386 	gfx_v9_0_wait_for_rlc_serdes(adev);
2387 }
2388 
2389 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2390 {
2391 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2392 	udelay(50);
2393 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2394 	udelay(50);
2395 }
2396 
2397 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2398 {
2399 #ifdef AMDGPU_RLC_DEBUG_RETRY
2400 	u32 rlc_ucode_ver;
2401 #endif
2402 
2403 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2404 	udelay(50);
2405 
2406 	/* carrizo do enable cp interrupt after cp inited */
2407 	if (!(adev->flags & AMD_IS_APU)) {
2408 		gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2409 		udelay(50);
2410 	}
2411 
2412 #ifdef AMDGPU_RLC_DEBUG_RETRY
2413 	/* RLC_GPM_GENERAL_6 : RLC Ucode version */
2414 	rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2415 	if(rlc_ucode_ver == 0x108) {
2416 		DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2417 				rlc_ucode_ver, adev->gfx.rlc_fw_version);
2418 		/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2419 		 * default is 0x9C4 to create a 100us interval */
2420 		WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2421 		/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2422 		 * to disable the page fault retry interrupts, default is
2423 		 * 0x100 (256) */
2424 		WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2425 	}
2426 #endif
2427 }
2428 
2429 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2430 {
2431 	const struct rlc_firmware_header_v2_0 *hdr;
2432 	const __le32 *fw_data;
2433 	unsigned i, fw_size;
2434 
2435 	if (!adev->gfx.rlc_fw)
2436 		return -EINVAL;
2437 
2438 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2439 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
2440 
2441 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2442 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2443 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2444 
2445 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2446 			RLCG_UCODE_LOADING_START_ADDRESS);
2447 	for (i = 0; i < fw_size; i++)
2448 		WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2449 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2450 
2451 	return 0;
2452 }
2453 
2454 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2455 {
2456 	int r;
2457 
2458 	if (amdgpu_sriov_vf(adev)) {
2459 		gfx_v9_0_init_csb(adev);
2460 		return 0;
2461 	}
2462 
2463 	adev->gfx.rlc.funcs->stop(adev);
2464 
2465 	/* disable CG */
2466 	WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2467 
2468 	gfx_v9_0_init_pg(adev);
2469 
2470 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2471 		/* legacy rlc firmware loading */
2472 		r = gfx_v9_0_rlc_load_microcode(adev);
2473 		if (r)
2474 			return r;
2475 	}
2476 
2477 	switch (adev->asic_type) {
2478 	case CHIP_RAVEN:
2479 		if (amdgpu_lbpw == 0)
2480 			gfx_v9_0_enable_lbpw(adev, false);
2481 		else
2482 			gfx_v9_0_enable_lbpw(adev, true);
2483 		break;
2484 	case CHIP_VEGA20:
2485 		if (amdgpu_lbpw > 0)
2486 			gfx_v9_0_enable_lbpw(adev, true);
2487 		else
2488 			gfx_v9_0_enable_lbpw(adev, false);
2489 		break;
2490 	default:
2491 		break;
2492 	}
2493 
2494 	adev->gfx.rlc.funcs->start(adev);
2495 
2496 	return 0;
2497 }
2498 
2499 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2500 {
2501 	int i;
2502 	u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2503 
2504 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2505 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2506 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2507 	if (!enable) {
2508 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2509 			adev->gfx.gfx_ring[i].sched.ready = false;
2510 	}
2511 	WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2512 	udelay(50);
2513 }
2514 
2515 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2516 {
2517 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2518 	const struct gfx_firmware_header_v1_0 *ce_hdr;
2519 	const struct gfx_firmware_header_v1_0 *me_hdr;
2520 	const __le32 *fw_data;
2521 	unsigned i, fw_size;
2522 
2523 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2524 		return -EINVAL;
2525 
2526 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2527 		adev->gfx.pfp_fw->data;
2528 	ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2529 		adev->gfx.ce_fw->data;
2530 	me_hdr = (const struct gfx_firmware_header_v1_0 *)
2531 		adev->gfx.me_fw->data;
2532 
2533 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2534 	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2535 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2536 
2537 	gfx_v9_0_cp_gfx_enable(adev, false);
2538 
2539 	/* PFP */
2540 	fw_data = (const __le32 *)
2541 		(adev->gfx.pfp_fw->data +
2542 		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2543 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2544 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2545 	for (i = 0; i < fw_size; i++)
2546 		WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2547 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2548 
2549 	/* CE */
2550 	fw_data = (const __le32 *)
2551 		(adev->gfx.ce_fw->data +
2552 		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2553 	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2554 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2555 	for (i = 0; i < fw_size; i++)
2556 		WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2557 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2558 
2559 	/* ME */
2560 	fw_data = (const __le32 *)
2561 		(adev->gfx.me_fw->data +
2562 		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2563 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2564 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2565 	for (i = 0; i < fw_size; i++)
2566 		WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2567 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2568 
2569 	return 0;
2570 }
2571 
2572 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2573 {
2574 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2575 	const struct cs_section_def *sect = NULL;
2576 	const struct cs_extent_def *ext = NULL;
2577 	int r, i, tmp;
2578 
2579 	/* init the CP */
2580 	WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2581 	WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2582 
2583 	gfx_v9_0_cp_gfx_enable(adev, true);
2584 
2585 	r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2586 	if (r) {
2587 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2588 		return r;
2589 	}
2590 
2591 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2592 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2593 
2594 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2595 	amdgpu_ring_write(ring, 0x80000000);
2596 	amdgpu_ring_write(ring, 0x80000000);
2597 
2598 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2599 		for (ext = sect->section; ext->extent != NULL; ++ext) {
2600 			if (sect->id == SECT_CONTEXT) {
2601 				amdgpu_ring_write(ring,
2602 				       PACKET3(PACKET3_SET_CONTEXT_REG,
2603 					       ext->reg_count));
2604 				amdgpu_ring_write(ring,
2605 				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2606 				for (i = 0; i < ext->reg_count; i++)
2607 					amdgpu_ring_write(ring, ext->extent[i]);
2608 			}
2609 		}
2610 	}
2611 
2612 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2613 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2614 
2615 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2616 	amdgpu_ring_write(ring, 0);
2617 
2618 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2619 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2620 	amdgpu_ring_write(ring, 0x8000);
2621 	amdgpu_ring_write(ring, 0x8000);
2622 
2623 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2624 	tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2625 		(SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2626 	amdgpu_ring_write(ring, tmp);
2627 	amdgpu_ring_write(ring, 0);
2628 
2629 	amdgpu_ring_commit(ring);
2630 
2631 	return 0;
2632 }
2633 
2634 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2635 {
2636 	struct amdgpu_ring *ring;
2637 	u32 tmp;
2638 	u32 rb_bufsz;
2639 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
2640 
2641 	/* Set the write pointer delay */
2642 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2643 
2644 	/* set the RB to use vmid 0 */
2645 	WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2646 
2647 	/* Set ring buffer size */
2648 	ring = &adev->gfx.gfx_ring[0];
2649 	rb_bufsz = order_base_2(ring->ring_size / 8);
2650 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2651 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2652 #ifdef __BIG_ENDIAN
2653 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2654 #endif
2655 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2656 
2657 	/* Initialize the ring buffer's write pointers */
2658 	ring->wptr = 0;
2659 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2660 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2661 
2662 	/* set the wb address wether it's enabled or not */
2663 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2664 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2665 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2666 
2667 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2668 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2669 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2670 
2671 	mdelay(1);
2672 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2673 
2674 	rb_addr = ring->gpu_addr >> 8;
2675 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2676 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2677 
2678 	tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2679 	if (ring->use_doorbell) {
2680 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2681 				    DOORBELL_OFFSET, ring->doorbell_index);
2682 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2683 				    DOORBELL_EN, 1);
2684 	} else {
2685 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2686 	}
2687 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2688 
2689 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2690 			DOORBELL_RANGE_LOWER, ring->doorbell_index);
2691 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2692 
2693 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2694 		       CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2695 
2696 
2697 	/* start the ring */
2698 	gfx_v9_0_cp_gfx_start(adev);
2699 	ring->sched.ready = true;
2700 
2701 	return 0;
2702 }
2703 
2704 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2705 {
2706 	int i;
2707 
2708 	if (enable) {
2709 		WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2710 	} else {
2711 		WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2712 			(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2713 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
2714 			adev->gfx.compute_ring[i].sched.ready = false;
2715 		adev->gfx.kiq.ring.sched.ready = false;
2716 	}
2717 	udelay(50);
2718 }
2719 
2720 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2721 {
2722 	const struct gfx_firmware_header_v1_0 *mec_hdr;
2723 	const __le32 *fw_data;
2724 	unsigned i;
2725 	u32 tmp;
2726 
2727 	if (!adev->gfx.mec_fw)
2728 		return -EINVAL;
2729 
2730 	gfx_v9_0_cp_compute_enable(adev, false);
2731 
2732 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2733 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2734 
2735 	fw_data = (const __le32 *)
2736 		(adev->gfx.mec_fw->data +
2737 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2738 	tmp = 0;
2739 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2740 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2741 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2742 
2743 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2744 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2745 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2746 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2747 
2748 	/* MEC1 */
2749 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2750 			 mec_hdr->jt_offset);
2751 	for (i = 0; i < mec_hdr->jt_size; i++)
2752 		WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2753 			le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2754 
2755 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2756 			adev->gfx.mec_fw_version);
2757 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2758 
2759 	return 0;
2760 }
2761 
2762 /* KIQ functions */
2763 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2764 {
2765 	uint32_t tmp;
2766 	struct amdgpu_device *adev = ring->adev;
2767 
2768 	/* tell RLC which is KIQ queue */
2769 	tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2770 	tmp &= 0xffffff00;
2771 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2772 	WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2773 	tmp |= 0x80;
2774 	WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2775 }
2776 
2777 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2778 {
2779 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2780 	uint64_t queue_mask = 0;
2781 	int r, i;
2782 
2783 	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2784 		if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2785 			continue;
2786 
2787 		/* This situation may be hit in the future if a new HW
2788 		 * generation exposes more than 64 queues. If so, the
2789 		 * definition of queue_mask needs updating */
2790 		if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2791 			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2792 			break;
2793 		}
2794 
2795 		queue_mask |= (1ull << i);
2796 	}
2797 
2798 	r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
2799 	if (r) {
2800 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2801 		return r;
2802 	}
2803 
2804 	/* set resources */
2805 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2806 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2807 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
2808 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
2809 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
2810 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
2811 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
2812 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
2813 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
2814 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2815 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2816 		uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2817 		uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2818 
2819 		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2820 		/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2821 		amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2822 				  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2823 				  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2824 				  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2825 				  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2826 				  PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2827 				  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2828 				  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
2829 				  PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2830 				  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2831 		amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2832 		amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2833 		amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2834 		amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2835 		amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2836 	}
2837 
2838 	r = amdgpu_ring_test_helper(kiq_ring);
2839 	if (r)
2840 		DRM_ERROR("KCQ enable failed\n");
2841 
2842 	return r;
2843 }
2844 
2845 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2846 {
2847 	struct amdgpu_device *adev = ring->adev;
2848 	struct v9_mqd *mqd = ring->mqd_ptr;
2849 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2850 	uint32_t tmp;
2851 
2852 	mqd->header = 0xC0310800;
2853 	mqd->compute_pipelinestat_enable = 0x00000001;
2854 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2855 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2856 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2857 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2858 	mqd->compute_misc_reserved = 0x00000003;
2859 
2860 	mqd->dynamic_cu_mask_addr_lo =
2861 		lower_32_bits(ring->mqd_gpu_addr
2862 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2863 	mqd->dynamic_cu_mask_addr_hi =
2864 		upper_32_bits(ring->mqd_gpu_addr
2865 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2866 
2867 	eop_base_addr = ring->eop_gpu_addr >> 8;
2868 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2869 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2870 
2871 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2872 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2873 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2874 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2875 
2876 	mqd->cp_hqd_eop_control = tmp;
2877 
2878 	/* enable doorbell? */
2879 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2880 
2881 	if (ring->use_doorbell) {
2882 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2883 				    DOORBELL_OFFSET, ring->doorbell_index);
2884 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2885 				    DOORBELL_EN, 1);
2886 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2887 				    DOORBELL_SOURCE, 0);
2888 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2889 				    DOORBELL_HIT, 0);
2890 	} else {
2891 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2892 					 DOORBELL_EN, 0);
2893 	}
2894 
2895 	mqd->cp_hqd_pq_doorbell_control = tmp;
2896 
2897 	/* disable the queue if it's active */
2898 	ring->wptr = 0;
2899 	mqd->cp_hqd_dequeue_request = 0;
2900 	mqd->cp_hqd_pq_rptr = 0;
2901 	mqd->cp_hqd_pq_wptr_lo = 0;
2902 	mqd->cp_hqd_pq_wptr_hi = 0;
2903 
2904 	/* set the pointer to the MQD */
2905 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2906 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2907 
2908 	/* set MQD vmid to 0 */
2909 	tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2910 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2911 	mqd->cp_mqd_control = tmp;
2912 
2913 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2914 	hqd_gpu_addr = ring->gpu_addr >> 8;
2915 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2916 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2917 
2918 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2919 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2920 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2921 			    (order_base_2(ring->ring_size / 4) - 1));
2922 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2923 			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2924 #ifdef __BIG_ENDIAN
2925 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2926 #endif
2927 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2928 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2929 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2930 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2931 	mqd->cp_hqd_pq_control = tmp;
2932 
2933 	/* set the wb address whether it's enabled or not */
2934 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2935 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2936 	mqd->cp_hqd_pq_rptr_report_addr_hi =
2937 		upper_32_bits(wb_gpu_addr) & 0xffff;
2938 
2939 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2940 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2941 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2942 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2943 
2944 	tmp = 0;
2945 	/* enable the doorbell if requested */
2946 	if (ring->use_doorbell) {
2947 		tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2948 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2949 				DOORBELL_OFFSET, ring->doorbell_index);
2950 
2951 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2952 					 DOORBELL_EN, 1);
2953 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2954 					 DOORBELL_SOURCE, 0);
2955 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2956 					 DOORBELL_HIT, 0);
2957 	}
2958 
2959 	mqd->cp_hqd_pq_doorbell_control = tmp;
2960 
2961 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2962 	ring->wptr = 0;
2963 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2964 
2965 	/* set the vmid for the queue */
2966 	mqd->cp_hqd_vmid = 0;
2967 
2968 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2969 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2970 	mqd->cp_hqd_persistent_state = tmp;
2971 
2972 	/* set MIN_IB_AVAIL_SIZE */
2973 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2974 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2975 	mqd->cp_hqd_ib_control = tmp;
2976 
2977 	/* activate the queue */
2978 	mqd->cp_hqd_active = 1;
2979 
2980 	return 0;
2981 }
2982 
2983 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2984 {
2985 	struct amdgpu_device *adev = ring->adev;
2986 	struct v9_mqd *mqd = ring->mqd_ptr;
2987 	int j;
2988 
2989 	/* disable wptr polling */
2990 	WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2991 
2992 	WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2993 	       mqd->cp_hqd_eop_base_addr_lo);
2994 	WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2995 	       mqd->cp_hqd_eop_base_addr_hi);
2996 
2997 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2998 	WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2999 	       mqd->cp_hqd_eop_control);
3000 
3001 	/* enable doorbell? */
3002 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3003 	       mqd->cp_hqd_pq_doorbell_control);
3004 
3005 	/* disable the queue if it's active */
3006 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3007 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3008 		for (j = 0; j < adev->usec_timeout; j++) {
3009 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3010 				break;
3011 			udelay(1);
3012 		}
3013 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3014 		       mqd->cp_hqd_dequeue_request);
3015 		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3016 		       mqd->cp_hqd_pq_rptr);
3017 		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3018 		       mqd->cp_hqd_pq_wptr_lo);
3019 		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3020 		       mqd->cp_hqd_pq_wptr_hi);
3021 	}
3022 
3023 	/* set the pointer to the MQD */
3024 	WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3025 	       mqd->cp_mqd_base_addr_lo);
3026 	WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3027 	       mqd->cp_mqd_base_addr_hi);
3028 
3029 	/* set MQD vmid to 0 */
3030 	WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3031 	       mqd->cp_mqd_control);
3032 
3033 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3034 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3035 	       mqd->cp_hqd_pq_base_lo);
3036 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3037 	       mqd->cp_hqd_pq_base_hi);
3038 
3039 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3040 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3041 	       mqd->cp_hqd_pq_control);
3042 
3043 	/* set the wb address whether it's enabled or not */
3044 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3045 				mqd->cp_hqd_pq_rptr_report_addr_lo);
3046 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3047 				mqd->cp_hqd_pq_rptr_report_addr_hi);
3048 
3049 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3050 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3051 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
3052 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3053 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
3054 
3055 	/* enable the doorbell if requested */
3056 	if (ring->use_doorbell) {
3057 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3058 					(adev->doorbell_index.kiq * 2) << 2);
3059 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3060 					(adev->doorbell_index.userqueue_end * 2) << 2);
3061 	}
3062 
3063 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3064 	       mqd->cp_hqd_pq_doorbell_control);
3065 
3066 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3067 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3068 	       mqd->cp_hqd_pq_wptr_lo);
3069 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3070 	       mqd->cp_hqd_pq_wptr_hi);
3071 
3072 	/* set the vmid for the queue */
3073 	WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3074 
3075 	WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3076 	       mqd->cp_hqd_persistent_state);
3077 
3078 	/* activate the queue */
3079 	WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3080 	       mqd->cp_hqd_active);
3081 
3082 	if (ring->use_doorbell)
3083 		WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3084 
3085 	return 0;
3086 }
3087 
3088 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3089 {
3090 	struct amdgpu_device *adev = ring->adev;
3091 	int j;
3092 
3093 	/* disable the queue if it's active */
3094 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3095 
3096 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3097 
3098 		for (j = 0; j < adev->usec_timeout; j++) {
3099 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3100 				break;
3101 			udelay(1);
3102 		}
3103 
3104 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3105 			DRM_DEBUG("KIQ dequeue request failed.\n");
3106 
3107 			/* Manual disable if dequeue request times out */
3108 			WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
3109 		}
3110 
3111 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3112 		      0);
3113 	}
3114 
3115 	WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3116 	WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3117 	WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3118 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3119 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3120 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3121 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3122 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3123 
3124 	return 0;
3125 }
3126 
3127 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3128 {
3129 	struct amdgpu_device *adev = ring->adev;
3130 	struct v9_mqd *mqd = ring->mqd_ptr;
3131 	int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3132 
3133 	gfx_v9_0_kiq_setting(ring);
3134 
3135 	if (adev->in_gpu_reset) { /* for GPU_RESET case */
3136 		/* reset MQD to a clean status */
3137 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3138 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3139 
3140 		/* reset ring buffer */
3141 		ring->wptr = 0;
3142 		amdgpu_ring_clear_ring(ring);
3143 
3144 		mutex_lock(&adev->srbm_mutex);
3145 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3146 		gfx_v9_0_kiq_init_register(ring);
3147 		soc15_grbm_select(adev, 0, 0, 0, 0);
3148 		mutex_unlock(&adev->srbm_mutex);
3149 	} else {
3150 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3151 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3152 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3153 		mutex_lock(&adev->srbm_mutex);
3154 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3155 		gfx_v9_0_mqd_init(ring);
3156 		gfx_v9_0_kiq_init_register(ring);
3157 		soc15_grbm_select(adev, 0, 0, 0, 0);
3158 		mutex_unlock(&adev->srbm_mutex);
3159 
3160 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3161 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3162 	}
3163 
3164 	return 0;
3165 }
3166 
3167 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3168 {
3169 	struct amdgpu_device *adev = ring->adev;
3170 	struct v9_mqd *mqd = ring->mqd_ptr;
3171 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
3172 
3173 	if (!adev->in_gpu_reset && !adev->in_suspend) {
3174 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3175 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3176 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3177 		mutex_lock(&adev->srbm_mutex);
3178 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3179 		gfx_v9_0_mqd_init(ring);
3180 		soc15_grbm_select(adev, 0, 0, 0, 0);
3181 		mutex_unlock(&adev->srbm_mutex);
3182 
3183 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3184 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3185 	} else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3186 		/* reset MQD to a clean status */
3187 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3188 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3189 
3190 		/* reset ring buffer */
3191 		ring->wptr = 0;
3192 		amdgpu_ring_clear_ring(ring);
3193 	} else {
3194 		amdgpu_ring_clear_ring(ring);
3195 	}
3196 
3197 	return 0;
3198 }
3199 
3200 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3201 {
3202 	struct amdgpu_ring *ring;
3203 	int r;
3204 
3205 	ring = &adev->gfx.kiq.ring;
3206 
3207 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
3208 	if (unlikely(r != 0))
3209 		return r;
3210 
3211 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3212 	if (unlikely(r != 0))
3213 		return r;
3214 
3215 	gfx_v9_0_kiq_init_queue(ring);
3216 	amdgpu_bo_kunmap(ring->mqd_obj);
3217 	ring->mqd_ptr = NULL;
3218 	amdgpu_bo_unreserve(ring->mqd_obj);
3219 	ring->sched.ready = true;
3220 	return 0;
3221 }
3222 
3223 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3224 {
3225 	struct amdgpu_ring *ring = NULL;
3226 	int r = 0, i;
3227 
3228 	gfx_v9_0_cp_compute_enable(adev, true);
3229 
3230 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3231 		ring = &adev->gfx.compute_ring[i];
3232 
3233 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
3234 		if (unlikely(r != 0))
3235 			goto done;
3236 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3237 		if (!r) {
3238 			r = gfx_v9_0_kcq_init_queue(ring);
3239 			amdgpu_bo_kunmap(ring->mqd_obj);
3240 			ring->mqd_ptr = NULL;
3241 		}
3242 		amdgpu_bo_unreserve(ring->mqd_obj);
3243 		if (r)
3244 			goto done;
3245 	}
3246 
3247 	r = gfx_v9_0_kiq_kcq_enable(adev);
3248 done:
3249 	return r;
3250 }
3251 
3252 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3253 {
3254 	int r, i;
3255 	struct amdgpu_ring *ring;
3256 
3257 	if (!(adev->flags & AMD_IS_APU))
3258 		gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3259 
3260 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3261 		/* legacy firmware loading */
3262 		r = gfx_v9_0_cp_gfx_load_microcode(adev);
3263 		if (r)
3264 			return r;
3265 
3266 		r = gfx_v9_0_cp_compute_load_microcode(adev);
3267 		if (r)
3268 			return r;
3269 	}
3270 
3271 	r = gfx_v9_0_kiq_resume(adev);
3272 	if (r)
3273 		return r;
3274 
3275 	r = gfx_v9_0_cp_gfx_resume(adev);
3276 	if (r)
3277 		return r;
3278 
3279 	r = gfx_v9_0_kcq_resume(adev);
3280 	if (r)
3281 		return r;
3282 
3283 	ring = &adev->gfx.gfx_ring[0];
3284 	r = amdgpu_ring_test_helper(ring);
3285 	if (r)
3286 		return r;
3287 
3288 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3289 		ring = &adev->gfx.compute_ring[i];
3290 		amdgpu_ring_test_helper(ring);
3291 	}
3292 
3293 	gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3294 
3295 	return 0;
3296 }
3297 
3298 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3299 {
3300 	gfx_v9_0_cp_gfx_enable(adev, enable);
3301 	gfx_v9_0_cp_compute_enable(adev, enable);
3302 }
3303 
3304 static int gfx_v9_0_hw_init(void *handle)
3305 {
3306 	int r;
3307 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3308 
3309 	gfx_v9_0_init_golden_registers(adev);
3310 
3311 	gfx_v9_0_constants_init(adev);
3312 
3313 	r = gfx_v9_0_csb_vram_pin(adev);
3314 	if (r)
3315 		return r;
3316 
3317 	r = adev->gfx.rlc.funcs->resume(adev);
3318 	if (r)
3319 		return r;
3320 
3321 	r = gfx_v9_0_cp_resume(adev);
3322 	if (r)
3323 		return r;
3324 
3325 	r = gfx_v9_0_ngg_en(adev);
3326 	if (r)
3327 		return r;
3328 
3329 	return r;
3330 }
3331 
3332 static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
3333 {
3334 	int r, i;
3335 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3336 
3337 	r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
3338 	if (r)
3339 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3340 
3341 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3342 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3343 
3344 		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
3345 		amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3346 						PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
3347 						PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
3348 						PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
3349 						PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
3350 		amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
3351 		amdgpu_ring_write(kiq_ring, 0);
3352 		amdgpu_ring_write(kiq_ring, 0);
3353 		amdgpu_ring_write(kiq_ring, 0);
3354 	}
3355 	r = amdgpu_ring_test_helper(kiq_ring);
3356 	if (r)
3357 		DRM_ERROR("KCQ disable failed\n");
3358 
3359 	return r;
3360 }
3361 
3362 static int gfx_v9_0_hw_fini(void *handle)
3363 {
3364 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3365 
3366 	amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3367 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3368 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3369 
3370 	/* disable KCQ to avoid CPC touch memory not valid anymore */
3371 	gfx_v9_0_kcq_disable(adev);
3372 
3373 	if (amdgpu_sriov_vf(adev)) {
3374 		gfx_v9_0_cp_gfx_enable(adev, false);
3375 		/* must disable polling for SRIOV when hw finished, otherwise
3376 		 * CPC engine may still keep fetching WB address which is already
3377 		 * invalid after sw finished and trigger DMAR reading error in
3378 		 * hypervisor side.
3379 		 */
3380 		WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3381 		return 0;
3382 	}
3383 
3384 	/* Use deinitialize sequence from CAIL when unbinding device from driver,
3385 	 * otherwise KIQ is hanging when binding back
3386 	 */
3387 	if (!adev->in_gpu_reset && !adev->in_suspend) {
3388 		mutex_lock(&adev->srbm_mutex);
3389 		soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3390 				adev->gfx.kiq.ring.pipe,
3391 				adev->gfx.kiq.ring.queue, 0);
3392 		gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3393 		soc15_grbm_select(adev, 0, 0, 0, 0);
3394 		mutex_unlock(&adev->srbm_mutex);
3395 	}
3396 
3397 	gfx_v9_0_cp_enable(adev, false);
3398 	adev->gfx.rlc.funcs->stop(adev);
3399 
3400 	gfx_v9_0_csb_vram_unpin(adev);
3401 
3402 	return 0;
3403 }
3404 
3405 static int gfx_v9_0_suspend(void *handle)
3406 {
3407 	return gfx_v9_0_hw_fini(handle);
3408 }
3409 
3410 static int gfx_v9_0_resume(void *handle)
3411 {
3412 	return gfx_v9_0_hw_init(handle);
3413 }
3414 
3415 static bool gfx_v9_0_is_idle(void *handle)
3416 {
3417 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3418 
3419 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3420 				GRBM_STATUS, GUI_ACTIVE))
3421 		return false;
3422 	else
3423 		return true;
3424 }
3425 
3426 static int gfx_v9_0_wait_for_idle(void *handle)
3427 {
3428 	unsigned i;
3429 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3430 
3431 	for (i = 0; i < adev->usec_timeout; i++) {
3432 		if (gfx_v9_0_is_idle(handle))
3433 			return 0;
3434 		udelay(1);
3435 	}
3436 	return -ETIMEDOUT;
3437 }
3438 
3439 static int gfx_v9_0_soft_reset(void *handle)
3440 {
3441 	u32 grbm_soft_reset = 0;
3442 	u32 tmp;
3443 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3444 
3445 	/* GRBM_STATUS */
3446 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3447 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3448 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3449 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3450 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3451 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3452 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3453 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3454 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3455 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3456 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3457 	}
3458 
3459 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3460 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3461 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3462 	}
3463 
3464 	/* GRBM_STATUS2 */
3465 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3466 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3467 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3468 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3469 
3470 
3471 	if (grbm_soft_reset) {
3472 		/* stop the rlc */
3473 		adev->gfx.rlc.funcs->stop(adev);
3474 
3475 		/* Disable GFX parsing/prefetching */
3476 		gfx_v9_0_cp_gfx_enable(adev, false);
3477 
3478 		/* Disable MEC parsing/prefetching */
3479 		gfx_v9_0_cp_compute_enable(adev, false);
3480 
3481 		if (grbm_soft_reset) {
3482 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3483 			tmp |= grbm_soft_reset;
3484 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3485 			WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3486 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3487 
3488 			udelay(50);
3489 
3490 			tmp &= ~grbm_soft_reset;
3491 			WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3492 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3493 		}
3494 
3495 		/* Wait a little for things to settle down */
3496 		udelay(50);
3497 	}
3498 	return 0;
3499 }
3500 
3501 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3502 {
3503 	uint64_t clock;
3504 
3505 	mutex_lock(&adev->gfx.gpu_clock_mutex);
3506 	WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3507 	clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3508 		((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3509 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
3510 	return clock;
3511 }
3512 
3513 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3514 					  uint32_t vmid,
3515 					  uint32_t gds_base, uint32_t gds_size,
3516 					  uint32_t gws_base, uint32_t gws_size,
3517 					  uint32_t oa_base, uint32_t oa_size)
3518 {
3519 	struct amdgpu_device *adev = ring->adev;
3520 
3521 	/* GDS Base */
3522 	gfx_v9_0_write_data_to_reg(ring, 0, false,
3523 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3524 				   gds_base);
3525 
3526 	/* GDS Size */
3527 	gfx_v9_0_write_data_to_reg(ring, 0, false,
3528 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3529 				   gds_size);
3530 
3531 	/* GWS */
3532 	gfx_v9_0_write_data_to_reg(ring, 0, false,
3533 				   SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3534 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3535 
3536 	/* OA */
3537 	gfx_v9_0_write_data_to_reg(ring, 0, false,
3538 				   SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3539 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
3540 }
3541 
3542 static int gfx_v9_0_early_init(void *handle)
3543 {
3544 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3545 
3546 	adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3547 	adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3548 	gfx_v9_0_set_ring_funcs(adev);
3549 	gfx_v9_0_set_irq_funcs(adev);
3550 	gfx_v9_0_set_gds_init(adev);
3551 	gfx_v9_0_set_rlc_funcs(adev);
3552 
3553 	return 0;
3554 }
3555 
3556 static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
3557 		struct amdgpu_iv_entry *entry);
3558 
3559 static int gfx_v9_0_ecc_late_init(void *handle)
3560 {
3561 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3562 	struct ras_common_if **ras_if = &adev->gfx.ras_if;
3563 	struct ras_ih_if ih_info = {
3564 		.cb = gfx_v9_0_process_ras_data_cb,
3565 	};
3566 	struct ras_fs_if fs_info = {
3567 		.sysfs_name = "gfx_err_count",
3568 		.debugfs_name = "gfx_err_inject",
3569 	};
3570 	struct ras_common_if ras_block = {
3571 		.block = AMDGPU_RAS_BLOCK__GFX,
3572 		.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
3573 		.sub_block_index = 0,
3574 		.name = "gfx",
3575 	};
3576 	int r;
3577 
3578 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
3579 		amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
3580 		return 0;
3581 	}
3582 
3583 	if (*ras_if)
3584 		goto resume;
3585 
3586 	*ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
3587 	if (!*ras_if)
3588 		return -ENOMEM;
3589 
3590 	**ras_if = ras_block;
3591 
3592 	r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
3593 	if (r)
3594 		goto feature;
3595 
3596 	ih_info.head = **ras_if;
3597 	fs_info.head = **ras_if;
3598 
3599 	r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
3600 	if (r)
3601 		goto interrupt;
3602 
3603 	r = amdgpu_ras_debugfs_create(adev, &fs_info);
3604 	if (r)
3605 		goto debugfs;
3606 
3607 	r = amdgpu_ras_sysfs_create(adev, &fs_info);
3608 	if (r)
3609 		goto sysfs;
3610 resume:
3611 	r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
3612 	if (r)
3613 		goto irq;
3614 
3615 	return 0;
3616 irq:
3617 	amdgpu_ras_sysfs_remove(adev, *ras_if);
3618 sysfs:
3619 	amdgpu_ras_debugfs_remove(adev, *ras_if);
3620 debugfs:
3621 	amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
3622 interrupt:
3623 	amdgpu_ras_feature_enable(adev, *ras_if, 0);
3624 feature:
3625 	kfree(*ras_if);
3626 	*ras_if = NULL;
3627 	return -EINVAL;
3628 }
3629 
3630 static int gfx_v9_0_late_init(void *handle)
3631 {
3632 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3633 	int r;
3634 
3635 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3636 	if (r)
3637 		return r;
3638 
3639 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3640 	if (r)
3641 		return r;
3642 
3643 	r = gfx_v9_0_ecc_late_init(handle);
3644 	if (r)
3645 		return r;
3646 
3647 	return 0;
3648 }
3649 
3650 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
3651 {
3652 	uint32_t rlc_setting;
3653 
3654 	/* if RLC is not enabled, do nothing */
3655 	rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3656 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3657 		return false;
3658 
3659 	return true;
3660 }
3661 
3662 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
3663 {
3664 	uint32_t data;
3665 	unsigned i;
3666 
3667 	data = RLC_SAFE_MODE__CMD_MASK;
3668 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3669 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3670 
3671 	/* wait for RLC_SAFE_MODE */
3672 	for (i = 0; i < adev->usec_timeout; i++) {
3673 		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3674 			break;
3675 		udelay(1);
3676 	}
3677 }
3678 
3679 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
3680 {
3681 	uint32_t data;
3682 
3683 	data = RLC_SAFE_MODE__CMD_MASK;
3684 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3685 }
3686 
3687 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3688 						bool enable)
3689 {
3690 	amdgpu_gfx_rlc_enter_safe_mode(adev);
3691 
3692 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3693 		gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3694 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3695 			gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3696 	} else {
3697 		gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3698 		gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3699 	}
3700 
3701 	amdgpu_gfx_rlc_exit_safe_mode(adev);
3702 }
3703 
3704 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3705 						bool enable)
3706 {
3707 	/* TODO: double check if we need to perform under safe mode */
3708 	/* gfx_v9_0_enter_rlc_safe_mode(adev); */
3709 
3710 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3711 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3712 	else
3713 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3714 
3715 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3716 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3717 	else
3718 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3719 
3720 	/* gfx_v9_0_exit_rlc_safe_mode(adev); */
3721 }
3722 
3723 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3724 						      bool enable)
3725 {
3726 	uint32_t data, def;
3727 
3728 	amdgpu_gfx_rlc_enter_safe_mode(adev);
3729 
3730 	/* It is disabled by HW by default */
3731 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3732 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
3733 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3734 
3735 		if (adev->asic_type != CHIP_VEGA12)
3736 			data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3737 
3738 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3739 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3740 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3741 
3742 		/* only for Vega10 & Raven1 */
3743 		data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3744 
3745 		if (def != data)
3746 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3747 
3748 		/* MGLS is a global flag to control all MGLS in GFX */
3749 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3750 			/* 2 - RLC memory Light sleep */
3751 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3752 				def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3753 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3754 				if (def != data)
3755 					WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3756 			}
3757 			/* 3 - CP memory Light sleep */
3758 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3759 				def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3760 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3761 				if (def != data)
3762 					WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3763 			}
3764 		}
3765 	} else {
3766 		/* 1 - MGCG_OVERRIDE */
3767 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3768 
3769 		if (adev->asic_type != CHIP_VEGA12)
3770 			data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3771 
3772 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3773 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3774 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3775 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3776 
3777 		if (def != data)
3778 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3779 
3780 		/* 2 - disable MGLS in RLC */
3781 		data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3782 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3783 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3784 			WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3785 		}
3786 
3787 		/* 3 - disable MGLS in CP */
3788 		data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3789 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3790 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3791 			WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3792 		}
3793 	}
3794 
3795 	amdgpu_gfx_rlc_exit_safe_mode(adev);
3796 }
3797 
3798 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3799 					   bool enable)
3800 {
3801 	uint32_t data, def;
3802 
3803 	amdgpu_gfx_rlc_enter_safe_mode(adev);
3804 
3805 	/* Enable 3D CGCG/CGLS */
3806 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3807 		/* write cmd to clear cgcg/cgls ov */
3808 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3809 		/* unset CGCG override */
3810 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3811 		/* update CGCG and CGLS override bits */
3812 		if (def != data)
3813 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3814 
3815 		/* enable 3Dcgcg FSM(0x0000363f) */
3816 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3817 
3818 		data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3819 			RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3820 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3821 			data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3822 				RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3823 		if (def != data)
3824 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3825 
3826 		/* set IDLE_POLL_COUNT(0x00900100) */
3827 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3828 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3829 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3830 		if (def != data)
3831 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3832 	} else {
3833 		/* Disable CGCG/CGLS */
3834 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3835 		/* disable cgcg, cgls should be disabled */
3836 		data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3837 			  RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3838 		/* disable cgcg and cgls in FSM */
3839 		if (def != data)
3840 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3841 	}
3842 
3843 	amdgpu_gfx_rlc_exit_safe_mode(adev);
3844 }
3845 
3846 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3847 						      bool enable)
3848 {
3849 	uint32_t def, data;
3850 
3851 	amdgpu_gfx_rlc_enter_safe_mode(adev);
3852 
3853 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3854 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3855 		/* unset CGCG override */
3856 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3857 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3858 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3859 		else
3860 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3861 		/* update CGCG and CGLS override bits */
3862 		if (def != data)
3863 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3864 
3865 		/* enable cgcg FSM(0x0000363F) */
3866 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3867 
3868 		data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3869 			RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3870 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3871 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3872 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3873 		if (def != data)
3874 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3875 
3876 		/* set IDLE_POLL_COUNT(0x00900100) */
3877 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3878 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3879 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3880 		if (def != data)
3881 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3882 	} else {
3883 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3884 		/* reset CGCG/CGLS bits */
3885 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3886 		/* disable cgcg and cgls in FSM */
3887 		if (def != data)
3888 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3889 	}
3890 
3891 	amdgpu_gfx_rlc_exit_safe_mode(adev);
3892 }
3893 
3894 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3895 					    bool enable)
3896 {
3897 	if (enable) {
3898 		/* CGCG/CGLS should be enabled after MGCG/MGLS
3899 		 * ===  MGCG + MGLS ===
3900 		 */
3901 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3902 		/* ===  CGCG /CGLS for GFX 3D Only === */
3903 		gfx_v9_0_update_3d_clock_gating(adev, enable);
3904 		/* ===  CGCG + CGLS === */
3905 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3906 	} else {
3907 		/* CGCG/CGLS should be disabled before MGCG/MGLS
3908 		 * ===  CGCG + CGLS ===
3909 		 */
3910 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3911 		/* ===  CGCG /CGLS for GFX 3D Only === */
3912 		gfx_v9_0_update_3d_clock_gating(adev, enable);
3913 		/* ===  MGCG + MGLS === */
3914 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3915 	}
3916 	return 0;
3917 }
3918 
3919 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3920 	.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
3921 	.set_safe_mode = gfx_v9_0_set_safe_mode,
3922 	.unset_safe_mode = gfx_v9_0_unset_safe_mode,
3923 	.init = gfx_v9_0_rlc_init,
3924 	.get_csb_size = gfx_v9_0_get_csb_size,
3925 	.get_csb_buffer = gfx_v9_0_get_csb_buffer,
3926 	.get_cp_table_num = gfx_v9_0_cp_jump_table_num,
3927 	.resume = gfx_v9_0_rlc_resume,
3928 	.stop = gfx_v9_0_rlc_stop,
3929 	.reset = gfx_v9_0_rlc_reset,
3930 	.start = gfx_v9_0_rlc_start
3931 };
3932 
3933 static int gfx_v9_0_set_powergating_state(void *handle,
3934 					  enum amd_powergating_state state)
3935 {
3936 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3937 	bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3938 
3939 	switch (adev->asic_type) {
3940 	case CHIP_RAVEN:
3941 		if (!enable) {
3942 			amdgpu_gfx_off_ctrl(adev, false);
3943 			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
3944 		}
3945 		if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3946 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3947 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3948 		} else {
3949 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3950 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3951 		}
3952 
3953 		if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3954 			gfx_v9_0_enable_cp_power_gating(adev, true);
3955 		else
3956 			gfx_v9_0_enable_cp_power_gating(adev, false);
3957 
3958 		/* update gfx cgpg state */
3959 		gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3960 
3961 		/* update mgcg state */
3962 		gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3963 
3964 		if (enable)
3965 			amdgpu_gfx_off_ctrl(adev, true);
3966 		break;
3967 	case CHIP_VEGA12:
3968 		if (!enable) {
3969 			amdgpu_gfx_off_ctrl(adev, false);
3970 			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
3971 		} else {
3972 			amdgpu_gfx_off_ctrl(adev, true);
3973 		}
3974 		break;
3975 	default:
3976 		break;
3977 	}
3978 
3979 	return 0;
3980 }
3981 
3982 static int gfx_v9_0_set_clockgating_state(void *handle,
3983 					  enum amd_clockgating_state state)
3984 {
3985 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3986 
3987 	if (amdgpu_sriov_vf(adev))
3988 		return 0;
3989 
3990 	switch (adev->asic_type) {
3991 	case CHIP_VEGA10:
3992 	case CHIP_VEGA12:
3993 	case CHIP_VEGA20:
3994 	case CHIP_RAVEN:
3995 		gfx_v9_0_update_gfx_clock_gating(adev,
3996 						 state == AMD_CG_STATE_GATE ? true : false);
3997 		break;
3998 	default:
3999 		break;
4000 	}
4001 	return 0;
4002 }
4003 
4004 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
4005 {
4006 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4007 	int data;
4008 
4009 	if (amdgpu_sriov_vf(adev))
4010 		*flags = 0;
4011 
4012 	/* AMD_CG_SUPPORT_GFX_MGCG */
4013 	data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4014 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4015 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
4016 
4017 	/* AMD_CG_SUPPORT_GFX_CGCG */
4018 	data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4019 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4020 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
4021 
4022 	/* AMD_CG_SUPPORT_GFX_CGLS */
4023 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4024 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
4025 
4026 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
4027 	data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4028 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
4029 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
4030 
4031 	/* AMD_CG_SUPPORT_GFX_CP_LS */
4032 	data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4033 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
4034 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
4035 
4036 	/* AMD_CG_SUPPORT_GFX_3D_CGCG */
4037 	data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4038 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4039 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4040 
4041 	/* AMD_CG_SUPPORT_GFX_3D_CGLS */
4042 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4043 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4044 }
4045 
4046 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4047 {
4048 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
4049 }
4050 
4051 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4052 {
4053 	struct amdgpu_device *adev = ring->adev;
4054 	u64 wptr;
4055 
4056 	/* XXX check if swapping is necessary on BE */
4057 	if (ring->use_doorbell) {
4058 		wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4059 	} else {
4060 		wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4061 		wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4062 	}
4063 
4064 	return wptr;
4065 }
4066 
4067 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4068 {
4069 	struct amdgpu_device *adev = ring->adev;
4070 
4071 	if (ring->use_doorbell) {
4072 		/* XXX check if swapping is necessary on BE */
4073 		atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4074 		WDOORBELL64(ring->doorbell_index, ring->wptr);
4075 	} else {
4076 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4077 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4078 	}
4079 }
4080 
4081 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4082 {
4083 	struct amdgpu_device *adev = ring->adev;
4084 	u32 ref_and_mask, reg_mem_engine;
4085 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
4086 
4087 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4088 		switch (ring->me) {
4089 		case 1:
4090 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4091 			break;
4092 		case 2:
4093 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4094 			break;
4095 		default:
4096 			return;
4097 		}
4098 		reg_mem_engine = 0;
4099 	} else {
4100 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4101 		reg_mem_engine = 1; /* pfp */
4102 	}
4103 
4104 	gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4105 			      adev->nbio_funcs->get_hdp_flush_req_offset(adev),
4106 			      adev->nbio_funcs->get_hdp_flush_done_offset(adev),
4107 			      ref_and_mask, ref_and_mask, 0x20);
4108 }
4109 
4110 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4111 					struct amdgpu_job *job,
4112 					struct amdgpu_ib *ib,
4113 					uint32_t flags)
4114 {
4115 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4116 	u32 header, control = 0;
4117 
4118 	if (ib->flags & AMDGPU_IB_FLAG_CE)
4119 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
4120 	else
4121 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4122 
4123 	control |= ib->length_dw | (vmid << 24);
4124 
4125 	if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4126 		control |= INDIRECT_BUFFER_PRE_ENB(1);
4127 
4128 		if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4129 			gfx_v9_0_ring_emit_de_meta(ring);
4130 	}
4131 
4132 	amdgpu_ring_write(ring, header);
4133 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4134 	amdgpu_ring_write(ring,
4135 #ifdef __BIG_ENDIAN
4136 		(2 << 0) |
4137 #endif
4138 		lower_32_bits(ib->gpu_addr));
4139 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4140 	amdgpu_ring_write(ring, control);
4141 }
4142 
4143 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4144 					  struct amdgpu_job *job,
4145 					  struct amdgpu_ib *ib,
4146 					  uint32_t flags)
4147 {
4148 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4149 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4150 
4151 	/* Currently, there is a high possibility to get wave ID mismatch
4152 	 * between ME and GDS, leading to a hw deadlock, because ME generates
4153 	 * different wave IDs than the GDS expects. This situation happens
4154 	 * randomly when at least 5 compute pipes use GDS ordered append.
4155 	 * The wave IDs generated by ME are also wrong after suspend/resume.
4156 	 * Those are probably bugs somewhere else in the kernel driver.
4157 	 *
4158 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
4159 	 * GDS to 0 for this ring (me/pipe).
4160 	 */
4161 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
4162 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4163 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
4164 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
4165 	}
4166 
4167 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4168 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4169 	amdgpu_ring_write(ring,
4170 #ifdef __BIG_ENDIAN
4171 				(2 << 0) |
4172 #endif
4173 				lower_32_bits(ib->gpu_addr));
4174 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4175 	amdgpu_ring_write(ring, control);
4176 }
4177 
4178 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4179 				     u64 seq, unsigned flags)
4180 {
4181 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4182 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4183 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
4184 
4185 	/* RELEASE_MEM - flush caches, send int */
4186 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4187 	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
4188 					       EOP_TC_NC_ACTION_EN) :
4189 					      (EOP_TCL1_ACTION_EN |
4190 					       EOP_TC_ACTION_EN |
4191 					       EOP_TC_WB_ACTION_EN |
4192 					       EOP_TC_MD_ACTION_EN)) |
4193 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4194 				 EVENT_INDEX(5)));
4195 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
4196 
4197 	/*
4198 	 * the address should be Qword aligned if 64bit write, Dword
4199 	 * aligned if only send 32bit data low (discard data high)
4200 	 */
4201 	if (write64bit)
4202 		BUG_ON(addr & 0x7);
4203 	else
4204 		BUG_ON(addr & 0x3);
4205 	amdgpu_ring_write(ring, lower_32_bits(addr));
4206 	amdgpu_ring_write(ring, upper_32_bits(addr));
4207 	amdgpu_ring_write(ring, lower_32_bits(seq));
4208 	amdgpu_ring_write(ring, upper_32_bits(seq));
4209 	amdgpu_ring_write(ring, 0);
4210 }
4211 
4212 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4213 {
4214 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4215 	uint32_t seq = ring->fence_drv.sync_seq;
4216 	uint64_t addr = ring->fence_drv.gpu_addr;
4217 
4218 	gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
4219 			      lower_32_bits(addr), upper_32_bits(addr),
4220 			      seq, 0xffffffff, 4);
4221 }
4222 
4223 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4224 					unsigned vmid, uint64_t pd_addr)
4225 {
4226 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4227 
4228 	/* compute doesn't have PFP */
4229 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4230 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
4231 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4232 		amdgpu_ring_write(ring, 0x0);
4233 	}
4234 }
4235 
4236 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4237 {
4238 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
4239 }
4240 
4241 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4242 {
4243 	u64 wptr;
4244 
4245 	/* XXX check if swapping is necessary on BE */
4246 	if (ring->use_doorbell)
4247 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4248 	else
4249 		BUG();
4250 	return wptr;
4251 }
4252 
4253 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
4254 					   bool acquire)
4255 {
4256 	struct amdgpu_device *adev = ring->adev;
4257 	int pipe_num, tmp, reg;
4258 	int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
4259 
4260 	pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
4261 
4262 	/* first me only has 2 entries, GFX and HP3D */
4263 	if (ring->me > 0)
4264 		pipe_num -= 2;
4265 
4266 	reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
4267 	tmp = RREG32(reg);
4268 	tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
4269 	WREG32(reg, tmp);
4270 }
4271 
4272 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
4273 					    struct amdgpu_ring *ring,
4274 					    bool acquire)
4275 {
4276 	int i, pipe;
4277 	bool reserve;
4278 	struct amdgpu_ring *iring;
4279 
4280 	mutex_lock(&adev->gfx.pipe_reserve_mutex);
4281 	pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
4282 	if (acquire)
4283 		set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4284 	else
4285 		clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4286 
4287 	if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
4288 		/* Clear all reservations - everyone reacquires all resources */
4289 		for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
4290 			gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
4291 						       true);
4292 
4293 		for (i = 0; i < adev->gfx.num_compute_rings; ++i)
4294 			gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
4295 						       true);
4296 	} else {
4297 		/* Lower all pipes without a current reservation */
4298 		for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
4299 			iring = &adev->gfx.gfx_ring[i];
4300 			pipe = amdgpu_gfx_queue_to_bit(adev,
4301 						       iring->me,
4302 						       iring->pipe,
4303 						       0);
4304 			reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4305 			gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4306 		}
4307 
4308 		for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
4309 			iring = &adev->gfx.compute_ring[i];
4310 			pipe = amdgpu_gfx_queue_to_bit(adev,
4311 						       iring->me,
4312 						       iring->pipe,
4313 						       0);
4314 			reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4315 			gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4316 		}
4317 	}
4318 
4319 	mutex_unlock(&adev->gfx.pipe_reserve_mutex);
4320 }
4321 
4322 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
4323 				      struct amdgpu_ring *ring,
4324 				      bool acquire)
4325 {
4326 	uint32_t pipe_priority = acquire ? 0x2 : 0x0;
4327 	uint32_t queue_priority = acquire ? 0xf : 0x0;
4328 
4329 	mutex_lock(&adev->srbm_mutex);
4330 	soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4331 
4332 	WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
4333 	WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
4334 
4335 	soc15_grbm_select(adev, 0, 0, 0, 0);
4336 	mutex_unlock(&adev->srbm_mutex);
4337 }
4338 
4339 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
4340 					       enum drm_sched_priority priority)
4341 {
4342 	struct amdgpu_device *adev = ring->adev;
4343 	bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
4344 
4345 	if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
4346 		return;
4347 
4348 	gfx_v9_0_hqd_set_priority(adev, ring, acquire);
4349 	gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
4350 }
4351 
4352 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4353 {
4354 	struct amdgpu_device *adev = ring->adev;
4355 
4356 	/* XXX check if swapping is necessary on BE */
4357 	if (ring->use_doorbell) {
4358 		atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4359 		WDOORBELL64(ring->doorbell_index, ring->wptr);
4360 	} else{
4361 		BUG(); /* only DOORBELL method supported on gfx9 now */
4362 	}
4363 }
4364 
4365 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4366 					 u64 seq, unsigned int flags)
4367 {
4368 	struct amdgpu_device *adev = ring->adev;
4369 
4370 	/* we only allocate 32bit for each seq wb address */
4371 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4372 
4373 	/* write fence seq to the "addr" */
4374 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4375 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4376 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4377 	amdgpu_ring_write(ring, lower_32_bits(addr));
4378 	amdgpu_ring_write(ring, upper_32_bits(addr));
4379 	amdgpu_ring_write(ring, lower_32_bits(seq));
4380 
4381 	if (flags & AMDGPU_FENCE_FLAG_INT) {
4382 		/* set register to trigger INT */
4383 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4384 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4385 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4386 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4387 		amdgpu_ring_write(ring, 0);
4388 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4389 	}
4390 }
4391 
4392 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
4393 {
4394 	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4395 	amdgpu_ring_write(ring, 0);
4396 }
4397 
4398 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
4399 {
4400 	struct v9_ce_ib_state ce_payload = {0};
4401 	uint64_t csa_addr;
4402 	int cnt;
4403 
4404 	cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4405 	csa_addr = amdgpu_csa_vaddr(ring->adev);
4406 
4407 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4408 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4409 				 WRITE_DATA_DST_SEL(8) |
4410 				 WR_CONFIRM) |
4411 				 WRITE_DATA_CACHE_POLICY(0));
4412 	amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4413 	amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4414 	amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
4415 }
4416 
4417 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
4418 {
4419 	struct v9_de_ib_state de_payload = {0};
4420 	uint64_t csa_addr, gds_addr;
4421 	int cnt;
4422 
4423 	csa_addr = amdgpu_csa_vaddr(ring->adev);
4424 	gds_addr = csa_addr + 4096;
4425 	de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4426 	de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4427 
4428 	cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4429 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4430 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4431 				 WRITE_DATA_DST_SEL(8) |
4432 				 WR_CONFIRM) |
4433 				 WRITE_DATA_CACHE_POLICY(0));
4434 	amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4435 	amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4436 	amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
4437 }
4438 
4439 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4440 {
4441 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4442 	amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4443 }
4444 
4445 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4446 {
4447 	uint32_t dw2 = 0;
4448 
4449 	if (amdgpu_sriov_vf(ring->adev))
4450 		gfx_v9_0_ring_emit_ce_meta(ring);
4451 
4452 	gfx_v9_0_ring_emit_tmz(ring, true);
4453 
4454 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4455 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4456 		/* set load_global_config & load_global_uconfig */
4457 		dw2 |= 0x8001;
4458 		/* set load_cs_sh_regs */
4459 		dw2 |= 0x01000000;
4460 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
4461 		dw2 |= 0x10002;
4462 
4463 		/* set load_ce_ram if preamble presented */
4464 		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4465 			dw2 |= 0x10000000;
4466 	} else {
4467 		/* still load_ce_ram if this is the first time preamble presented
4468 		 * although there is no context switch happens.
4469 		 */
4470 		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4471 			dw2 |= 0x10000000;
4472 	}
4473 
4474 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4475 	amdgpu_ring_write(ring, dw2);
4476 	amdgpu_ring_write(ring, 0);
4477 }
4478 
4479 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4480 {
4481 	unsigned ret;
4482 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4483 	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4484 	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4485 	amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4486 	ret = ring->wptr & ring->buf_mask;
4487 	amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4488 	return ret;
4489 }
4490 
4491 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4492 {
4493 	unsigned cur;
4494 	BUG_ON(offset > ring->buf_mask);
4495 	BUG_ON(ring->ring[offset] != 0x55aa55aa);
4496 
4497 	cur = (ring->wptr & ring->buf_mask) - 1;
4498 	if (likely(cur > offset))
4499 		ring->ring[offset] = cur - offset;
4500 	else
4501 		ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
4502 }
4503 
4504 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4505 {
4506 	struct amdgpu_device *adev = ring->adev;
4507 
4508 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4509 	amdgpu_ring_write(ring, 0 |	/* src: register*/
4510 				(5 << 8) |	/* dst: memory */
4511 				(1 << 20));	/* write confirm */
4512 	amdgpu_ring_write(ring, reg);
4513 	amdgpu_ring_write(ring, 0);
4514 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4515 				adev->virt.reg_val_offs * 4));
4516 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4517 				adev->virt.reg_val_offs * 4));
4518 }
4519 
4520 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4521 				    uint32_t val)
4522 {
4523 	uint32_t cmd = 0;
4524 
4525 	switch (ring->funcs->type) {
4526 	case AMDGPU_RING_TYPE_GFX:
4527 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4528 		break;
4529 	case AMDGPU_RING_TYPE_KIQ:
4530 		cmd = (1 << 16); /* no inc addr */
4531 		break;
4532 	default:
4533 		cmd = WR_CONFIRM;
4534 		break;
4535 	}
4536 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4537 	amdgpu_ring_write(ring, cmd);
4538 	amdgpu_ring_write(ring, reg);
4539 	amdgpu_ring_write(ring, 0);
4540 	amdgpu_ring_write(ring, val);
4541 }
4542 
4543 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4544 					uint32_t val, uint32_t mask)
4545 {
4546 	gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4547 }
4548 
4549 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4550 						  uint32_t reg0, uint32_t reg1,
4551 						  uint32_t ref, uint32_t mask)
4552 {
4553 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4554 	struct amdgpu_device *adev = ring->adev;
4555 	bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
4556 		adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
4557 
4558 	if (fw_version_ok)
4559 		gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4560 				      ref, mask, 0x20);
4561 	else
4562 		amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4563 							   ref, mask);
4564 }
4565 
4566 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4567 {
4568 	struct amdgpu_device *adev = ring->adev;
4569 	uint32_t value = 0;
4570 
4571 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
4572 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4573 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4574 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4575 	WREG32(mmSQ_CMD, value);
4576 }
4577 
4578 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4579 						 enum amdgpu_interrupt_state state)
4580 {
4581 	switch (state) {
4582 	case AMDGPU_IRQ_STATE_DISABLE:
4583 	case AMDGPU_IRQ_STATE_ENABLE:
4584 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4585 			       TIME_STAMP_INT_ENABLE,
4586 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4587 		break;
4588 	default:
4589 		break;
4590 	}
4591 }
4592 
4593 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4594 						     int me, int pipe,
4595 						     enum amdgpu_interrupt_state state)
4596 {
4597 	u32 mec_int_cntl, mec_int_cntl_reg;
4598 
4599 	/*
4600 	 * amdgpu controls only the first MEC. That's why this function only
4601 	 * handles the setting of interrupts for this specific MEC. All other
4602 	 * pipes' interrupts are set by amdkfd.
4603 	 */
4604 
4605 	if (me == 1) {
4606 		switch (pipe) {
4607 		case 0:
4608 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4609 			break;
4610 		case 1:
4611 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4612 			break;
4613 		case 2:
4614 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4615 			break;
4616 		case 3:
4617 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4618 			break;
4619 		default:
4620 			DRM_DEBUG("invalid pipe %d\n", pipe);
4621 			return;
4622 		}
4623 	} else {
4624 		DRM_DEBUG("invalid me %d\n", me);
4625 		return;
4626 	}
4627 
4628 	switch (state) {
4629 	case AMDGPU_IRQ_STATE_DISABLE:
4630 		mec_int_cntl = RREG32(mec_int_cntl_reg);
4631 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4632 					     TIME_STAMP_INT_ENABLE, 0);
4633 		WREG32(mec_int_cntl_reg, mec_int_cntl);
4634 		break;
4635 	case AMDGPU_IRQ_STATE_ENABLE:
4636 		mec_int_cntl = RREG32(mec_int_cntl_reg);
4637 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4638 					     TIME_STAMP_INT_ENABLE, 1);
4639 		WREG32(mec_int_cntl_reg, mec_int_cntl);
4640 		break;
4641 	default:
4642 		break;
4643 	}
4644 }
4645 
4646 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4647 					     struct amdgpu_irq_src *source,
4648 					     unsigned type,
4649 					     enum amdgpu_interrupt_state state)
4650 {
4651 	switch (state) {
4652 	case AMDGPU_IRQ_STATE_DISABLE:
4653 	case AMDGPU_IRQ_STATE_ENABLE:
4654 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4655 			       PRIV_REG_INT_ENABLE,
4656 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4657 		break;
4658 	default:
4659 		break;
4660 	}
4661 
4662 	return 0;
4663 }
4664 
4665 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4666 					      struct amdgpu_irq_src *source,
4667 					      unsigned type,
4668 					      enum amdgpu_interrupt_state state)
4669 {
4670 	switch (state) {
4671 	case AMDGPU_IRQ_STATE_DISABLE:
4672 	case AMDGPU_IRQ_STATE_ENABLE:
4673 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4674 			       PRIV_INSTR_INT_ENABLE,
4675 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4676 	default:
4677 		break;
4678 	}
4679 
4680 	return 0;
4681 }
4682 
4683 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)				\
4684 	WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
4685 			CP_ECC_ERROR_INT_ENABLE, 1)
4686 
4687 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)			\
4688 	WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
4689 			CP_ECC_ERROR_INT_ENABLE, 0)
4690 
4691 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
4692 					      struct amdgpu_irq_src *source,
4693 					      unsigned type,
4694 					      enum amdgpu_interrupt_state state)
4695 {
4696 	switch (state) {
4697 	case AMDGPU_IRQ_STATE_DISABLE:
4698 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4699 				CP_ECC_ERROR_INT_ENABLE, 0);
4700 		DISABLE_ECC_ON_ME_PIPE(1, 0);
4701 		DISABLE_ECC_ON_ME_PIPE(1, 1);
4702 		DISABLE_ECC_ON_ME_PIPE(1, 2);
4703 		DISABLE_ECC_ON_ME_PIPE(1, 3);
4704 		break;
4705 
4706 	case AMDGPU_IRQ_STATE_ENABLE:
4707 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4708 				CP_ECC_ERROR_INT_ENABLE, 1);
4709 		ENABLE_ECC_ON_ME_PIPE(1, 0);
4710 		ENABLE_ECC_ON_ME_PIPE(1, 1);
4711 		ENABLE_ECC_ON_ME_PIPE(1, 2);
4712 		ENABLE_ECC_ON_ME_PIPE(1, 3);
4713 		break;
4714 	default:
4715 		break;
4716 	}
4717 
4718 	return 0;
4719 }
4720 
4721 
4722 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4723 					    struct amdgpu_irq_src *src,
4724 					    unsigned type,
4725 					    enum amdgpu_interrupt_state state)
4726 {
4727 	switch (type) {
4728 	case AMDGPU_CP_IRQ_GFX_EOP:
4729 		gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
4730 		break;
4731 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4732 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4733 		break;
4734 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4735 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4736 		break;
4737 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4738 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4739 		break;
4740 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4741 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4742 		break;
4743 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4744 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4745 		break;
4746 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4747 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4748 		break;
4749 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4750 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4751 		break;
4752 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4753 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4754 		break;
4755 	default:
4756 		break;
4757 	}
4758 	return 0;
4759 }
4760 
4761 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
4762 			    struct amdgpu_irq_src *source,
4763 			    struct amdgpu_iv_entry *entry)
4764 {
4765 	int i;
4766 	u8 me_id, pipe_id, queue_id;
4767 	struct amdgpu_ring *ring;
4768 
4769 	DRM_DEBUG("IH: CP EOP\n");
4770 	me_id = (entry->ring_id & 0x0c) >> 2;
4771 	pipe_id = (entry->ring_id & 0x03) >> 0;
4772 	queue_id = (entry->ring_id & 0x70) >> 4;
4773 
4774 	switch (me_id) {
4775 	case 0:
4776 		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4777 		break;
4778 	case 1:
4779 	case 2:
4780 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4781 			ring = &adev->gfx.compute_ring[i];
4782 			/* Per-queue interrupt is supported for MEC starting from VI.
4783 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
4784 			  */
4785 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4786 				amdgpu_fence_process(ring);
4787 		}
4788 		break;
4789 	}
4790 	return 0;
4791 }
4792 
4793 static void gfx_v9_0_fault(struct amdgpu_device *adev,
4794 			   struct amdgpu_iv_entry *entry)
4795 {
4796 	u8 me_id, pipe_id, queue_id;
4797 	struct amdgpu_ring *ring;
4798 	int i;
4799 
4800 	me_id = (entry->ring_id & 0x0c) >> 2;
4801 	pipe_id = (entry->ring_id & 0x03) >> 0;
4802 	queue_id = (entry->ring_id & 0x70) >> 4;
4803 
4804 	switch (me_id) {
4805 	case 0:
4806 		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
4807 		break;
4808 	case 1:
4809 	case 2:
4810 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4811 			ring = &adev->gfx.compute_ring[i];
4812 			if (ring->me == me_id && ring->pipe == pipe_id &&
4813 			    ring->queue == queue_id)
4814 				drm_sched_fault(&ring->sched);
4815 		}
4816 		break;
4817 	}
4818 }
4819 
4820 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4821 				 struct amdgpu_irq_src *source,
4822 				 struct amdgpu_iv_entry *entry)
4823 {
4824 	DRM_ERROR("Illegal register access in command stream\n");
4825 	gfx_v9_0_fault(adev, entry);
4826 	return 0;
4827 }
4828 
4829 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4830 				  struct amdgpu_irq_src *source,
4831 				  struct amdgpu_iv_entry *entry)
4832 {
4833 	DRM_ERROR("Illegal instruction in command stream\n");
4834 	gfx_v9_0_fault(adev, entry);
4835 	return 0;
4836 }
4837 
4838 static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
4839 		struct amdgpu_iv_entry *entry)
4840 {
4841 	/* TODO ue will trigger an interrupt. */
4842 	kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
4843 	amdgpu_ras_reset_gpu(adev, 0);
4844 	return AMDGPU_RAS_UE;
4845 }
4846 
4847 static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
4848 				  struct amdgpu_irq_src *source,
4849 				  struct amdgpu_iv_entry *entry)
4850 {
4851 	struct ras_common_if *ras_if = adev->gfx.ras_if;
4852 	struct ras_dispatch_if ih_data = {
4853 		.entry = entry,
4854 	};
4855 
4856 	if (!ras_if)
4857 		return 0;
4858 
4859 	ih_data.head = *ras_if;
4860 
4861 	DRM_ERROR("CP ECC ERROR IRQ\n");
4862 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
4863 	return 0;
4864 }
4865 
4866 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4867 	.name = "gfx_v9_0",
4868 	.early_init = gfx_v9_0_early_init,
4869 	.late_init = gfx_v9_0_late_init,
4870 	.sw_init = gfx_v9_0_sw_init,
4871 	.sw_fini = gfx_v9_0_sw_fini,
4872 	.hw_init = gfx_v9_0_hw_init,
4873 	.hw_fini = gfx_v9_0_hw_fini,
4874 	.suspend = gfx_v9_0_suspend,
4875 	.resume = gfx_v9_0_resume,
4876 	.is_idle = gfx_v9_0_is_idle,
4877 	.wait_for_idle = gfx_v9_0_wait_for_idle,
4878 	.soft_reset = gfx_v9_0_soft_reset,
4879 	.set_clockgating_state = gfx_v9_0_set_clockgating_state,
4880 	.set_powergating_state = gfx_v9_0_set_powergating_state,
4881 	.get_clockgating_state = gfx_v9_0_get_clockgating_state,
4882 };
4883 
4884 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4885 	.type = AMDGPU_RING_TYPE_GFX,
4886 	.align_mask = 0xff,
4887 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4888 	.support_64bit_ptrs = true,
4889 	.vmhub = AMDGPU_GFXHUB,
4890 	.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4891 	.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4892 	.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4893 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
4894 		5 +  /* COND_EXEC */
4895 		7 +  /* PIPELINE_SYNC */
4896 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4897 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4898 		2 + /* VM_FLUSH */
4899 		8 +  /* FENCE for VM_FLUSH */
4900 		20 + /* GDS switch */
4901 		4 + /* double SWITCH_BUFFER,
4902 		       the first COND_EXEC jump to the place just
4903 			   prior to this double SWITCH_BUFFER  */
4904 		5 + /* COND_EXEC */
4905 		7 +	 /*	HDP_flush */
4906 		4 +	 /*	VGT_flush */
4907 		14 + /*	CE_META */
4908 		31 + /*	DE_META */
4909 		3 + /* CNTX_CTRL */
4910 		5 + /* HDP_INVL */
4911 		8 + 8 + /* FENCE x2 */
4912 		2, /* SWITCH_BUFFER */
4913 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_gfx */
4914 	.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4915 	.emit_fence = gfx_v9_0_ring_emit_fence,
4916 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4917 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4918 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4919 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4920 	.test_ring = gfx_v9_0_ring_test_ring,
4921 	.test_ib = gfx_v9_0_ring_test_ib,
4922 	.insert_nop = amdgpu_ring_insert_nop,
4923 	.pad_ib = amdgpu_ring_generic_pad_ib,
4924 	.emit_switch_buffer = gfx_v9_ring_emit_sb,
4925 	.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4926 	.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4927 	.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4928 	.emit_tmz = gfx_v9_0_ring_emit_tmz,
4929 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
4930 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4931 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4932 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
4933 };
4934 
4935 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4936 	.type = AMDGPU_RING_TYPE_COMPUTE,
4937 	.align_mask = 0xff,
4938 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4939 	.support_64bit_ptrs = true,
4940 	.vmhub = AMDGPU_GFXHUB,
4941 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
4942 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
4943 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
4944 	.emit_frame_size =
4945 		20 + /* gfx_v9_0_ring_emit_gds_switch */
4946 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
4947 		5 + /* hdp invalidate */
4948 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4949 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4950 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4951 		2 + /* gfx_v9_0_ring_emit_vm_flush */
4952 		8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4953 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
4954 	.emit_ib = gfx_v9_0_ring_emit_ib_compute,
4955 	.emit_fence = gfx_v9_0_ring_emit_fence,
4956 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4957 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4958 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4959 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4960 	.test_ring = gfx_v9_0_ring_test_ring,
4961 	.test_ib = gfx_v9_0_ring_test_ib,
4962 	.insert_nop = amdgpu_ring_insert_nop,
4963 	.pad_ib = amdgpu_ring_generic_pad_ib,
4964 	.set_priority = gfx_v9_0_ring_set_priority_compute,
4965 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
4966 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4967 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4968 };
4969 
4970 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4971 	.type = AMDGPU_RING_TYPE_KIQ,
4972 	.align_mask = 0xff,
4973 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4974 	.support_64bit_ptrs = true,
4975 	.vmhub = AMDGPU_GFXHUB,
4976 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
4977 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
4978 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
4979 	.emit_frame_size =
4980 		20 + /* gfx_v9_0_ring_emit_gds_switch */
4981 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
4982 		5 + /* hdp invalidate */
4983 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4984 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4985 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4986 		2 + /* gfx_v9_0_ring_emit_vm_flush */
4987 		8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4988 	.emit_ib_size =	7, /* gfx_v9_0_ring_emit_ib_compute */
4989 	.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4990 	.test_ring = gfx_v9_0_ring_test_ring,
4991 	.insert_nop = amdgpu_ring_insert_nop,
4992 	.pad_ib = amdgpu_ring_generic_pad_ib,
4993 	.emit_rreg = gfx_v9_0_ring_emit_rreg,
4994 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
4995 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4996 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4997 };
4998 
4999 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
5000 {
5001 	int i;
5002 
5003 	adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
5004 
5005 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5006 		adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
5007 
5008 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
5009 		adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
5010 }
5011 
5012 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
5013 	.set = gfx_v9_0_set_eop_interrupt_state,
5014 	.process = gfx_v9_0_eop_irq,
5015 };
5016 
5017 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
5018 	.set = gfx_v9_0_set_priv_reg_fault_state,
5019 	.process = gfx_v9_0_priv_reg_irq,
5020 };
5021 
5022 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
5023 	.set = gfx_v9_0_set_priv_inst_fault_state,
5024 	.process = gfx_v9_0_priv_inst_irq,
5025 };
5026 
5027 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
5028 	.set = gfx_v9_0_set_cp_ecc_error_state,
5029 	.process = gfx_v9_0_cp_ecc_error_irq,
5030 };
5031 
5032 
5033 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
5034 {
5035 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5036 	adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
5037 
5038 	adev->gfx.priv_reg_irq.num_types = 1;
5039 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
5040 
5041 	adev->gfx.priv_inst_irq.num_types = 1;
5042 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
5043 
5044 	adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
5045 	adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
5046 }
5047 
5048 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
5049 {
5050 	switch (adev->asic_type) {
5051 	case CHIP_VEGA10:
5052 	case CHIP_VEGA12:
5053 	case CHIP_VEGA20:
5054 	case CHIP_RAVEN:
5055 		adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
5056 		break;
5057 	default:
5058 		break;
5059 	}
5060 }
5061 
5062 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
5063 {
5064 	/* init asci gds info */
5065 	switch (adev->asic_type) {
5066 	case CHIP_VEGA10:
5067 	case CHIP_VEGA12:
5068 	case CHIP_VEGA20:
5069 		adev->gds.mem.total_size = 0x10000;
5070 		break;
5071 	case CHIP_RAVEN:
5072 		adev->gds.mem.total_size = 0x1000;
5073 		break;
5074 	default:
5075 		adev->gds.mem.total_size = 0x10000;
5076 		break;
5077 	}
5078 
5079 	switch (adev->asic_type) {
5080 	case CHIP_VEGA10:
5081 	case CHIP_VEGA20:
5082 		adev->gds.gds_compute_max_wave_id = 0x7ff;
5083 		break;
5084 	case CHIP_VEGA12:
5085 		adev->gds.gds_compute_max_wave_id = 0x27f;
5086 		break;
5087 	case CHIP_RAVEN:
5088 		if (adev->rev_id >= 0x8)
5089 			adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
5090 		else
5091 			adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
5092 		break;
5093 	default:
5094 		/* this really depends on the chip */
5095 		adev->gds.gds_compute_max_wave_id = 0x7ff;
5096 		break;
5097 	}
5098 
5099 	adev->gds.gws.total_size = 64;
5100 	adev->gds.oa.total_size = 16;
5101 
5102 	if (adev->gds.mem.total_size == 64 * 1024) {
5103 		adev->gds.mem.gfx_partition_size = 4096;
5104 		adev->gds.mem.cs_partition_size = 4096;
5105 
5106 		adev->gds.gws.gfx_partition_size = 4;
5107 		adev->gds.gws.cs_partition_size = 4;
5108 
5109 		adev->gds.oa.gfx_partition_size = 4;
5110 		adev->gds.oa.cs_partition_size = 1;
5111 	} else {
5112 		adev->gds.mem.gfx_partition_size = 1024;
5113 		adev->gds.mem.cs_partition_size = 1024;
5114 
5115 		adev->gds.gws.gfx_partition_size = 16;
5116 		adev->gds.gws.cs_partition_size = 16;
5117 
5118 		adev->gds.oa.gfx_partition_size = 4;
5119 		adev->gds.oa.cs_partition_size = 4;
5120 	}
5121 }
5122 
5123 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
5124 						 u32 bitmap)
5125 {
5126 	u32 data;
5127 
5128 	if (!bitmap)
5129 		return;
5130 
5131 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
5132 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
5133 
5134 	WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
5135 }
5136 
5137 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
5138 {
5139 	u32 data, mask;
5140 
5141 	data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
5142 	data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
5143 
5144 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
5145 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
5146 
5147 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
5148 
5149 	return (~data) & mask;
5150 }
5151 
5152 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
5153 				 struct amdgpu_cu_info *cu_info)
5154 {
5155 	int i, j, k, counter, active_cu_number = 0;
5156 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5157 	unsigned disable_masks[4 * 2];
5158 
5159 	if (!adev || !cu_info)
5160 		return -EINVAL;
5161 
5162 	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5163 
5164 	mutex_lock(&adev->grbm_idx_mutex);
5165 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5166 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5167 			mask = 1;
5168 			ao_bitmap = 0;
5169 			counter = 0;
5170 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
5171 			if (i < 4 && j < 2)
5172 				gfx_v9_0_set_user_cu_inactive_bitmap(
5173 					adev, disable_masks[i * 2 + j]);
5174 			bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
5175 			cu_info->bitmap[i][j] = bitmap;
5176 
5177 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
5178 				if (bitmap & mask) {
5179 					if (counter < adev->gfx.config.max_cu_per_sh)
5180 						ao_bitmap |= mask;
5181 					counter ++;
5182 				}
5183 				mask <<= 1;
5184 			}
5185 			active_cu_number += counter;
5186 			if (i < 2 && j < 2)
5187 				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5188 			cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5189 		}
5190 	}
5191 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5192 	mutex_unlock(&adev->grbm_idx_mutex);
5193 
5194 	cu_info->number = active_cu_number;
5195 	cu_info->ao_cu_mask = ao_cu_mask;
5196 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5197 
5198 	return 0;
5199 }
5200 
5201 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
5202 {
5203 	.type = AMD_IP_BLOCK_TYPE_GFX,
5204 	.major = 9,
5205 	.minor = 0,
5206 	.rev = 0,
5207 	.funcs = &gfx_v9_0_ip_funcs,
5208 };
5209