xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c (revision 8dfb839c)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/kernel.h>
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "amdgpu_atomfirmware.h"
31 
32 #include "gc/gc_9_0_offset.h"
33 #include "gc/gc_9_0_sh_mask.h"
34 #include "vega10_enum.h"
35 #include "hdp/hdp_4_0_offset.h"
36 
37 #include "soc15_common.h"
38 #include "clearstate_gfx9.h"
39 #include "v9_structs.h"
40 
41 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
42 
43 #define GFX9_NUM_GFX_RINGS     1
44 #define GFX9_MEC_HPD_SIZE 2048
45 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
46 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
47 
48 #define mmPWR_MISC_CNTL_STATUS					0x0183
49 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX				0
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT	0x0
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT		0x1
52 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK		0x00000001L
53 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK		0x00000006L
54 
55 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
56 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
57 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
58 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
59 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
60 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
61 
62 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
63 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
64 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
65 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
66 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
67 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
68 
69 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
70 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
71 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
72 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
73 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
74 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
75 
76 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
77 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/raven_me.bin");
79 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
80 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
81 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
82 
83 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
84 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
85 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
86 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
87 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
88 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
89 
90 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
91 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
92 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
93 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
94 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
95 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
96 
97 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
98 {
99 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
100 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
101 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
102 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
103 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
104 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
105 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
106 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
107 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
108 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
109 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
110 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
111 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
112 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
113 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
114 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
115 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
116 };
117 
118 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
119 {
120 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
121 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
122 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
123 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
124 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
125 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
126 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
127 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
128 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
129 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
130 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
131 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
132 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
133 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
134 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
135 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
136 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
137 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
138 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
139 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
140 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
141 };
142 
143 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
144 {
145 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
146 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
147 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
148 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
149 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
150 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
151 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
152 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
153 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
154 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
155 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
156 };
157 
158 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
159 {
160 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
161 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
162 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
163 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
164 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
165 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
166 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
167 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
168 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
169 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
170 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
171 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
172 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
173 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
174 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
175 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
176 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
177 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
178 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
179 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
180 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
181 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
182 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
183 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
184 };
185 
186 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
187 {
188 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
189 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
190 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
191 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
192 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
193 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
194 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
195 };
196 
197 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
198 {
199 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
200 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
201 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
202 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
203 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
204 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
205 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
206 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
207 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
208 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
209 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
210 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
211 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
212 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
213 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
214 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
215 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
216 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
217 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
218 };
219 
220 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
221 {
222 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
223 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
224 };
225 
226 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
227 {
228 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
229 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
230 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
231 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
232 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
233 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
234 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
235 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
236 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
237 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
238 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
239 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
240 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
241 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
242 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
243 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
244 };
245 
246 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
247 {
248 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
249 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
250 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
251 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
252 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
253 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
254 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
255 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
256 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
257 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
258 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
259 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
260 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
261 };
262 
263 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
264 {
265 	mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
266 	mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
267 	mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
268 	mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
269 	mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
270 	mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
271 	mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
272 	mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
273 };
274 
275 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
276 {
277 	mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
278 	mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
279 	mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
280 	mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
281 	mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
282 	mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
283 	mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
284 	mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
285 };
286 
287 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
288 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
289 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
290 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
291 
292 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
293 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
294 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
295 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
296 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
297                                  struct amdgpu_cu_info *cu_info);
298 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
299 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
300 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
301 
302 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
303 {
304 	switch (adev->asic_type) {
305 	case CHIP_VEGA10:
306 		soc15_program_register_sequence(adev,
307 						 golden_settings_gc_9_0,
308 						 ARRAY_SIZE(golden_settings_gc_9_0));
309 		soc15_program_register_sequence(adev,
310 						 golden_settings_gc_9_0_vg10,
311 						 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
312 		break;
313 	case CHIP_VEGA12:
314 		soc15_program_register_sequence(adev,
315 						golden_settings_gc_9_2_1,
316 						ARRAY_SIZE(golden_settings_gc_9_2_1));
317 		soc15_program_register_sequence(adev,
318 						golden_settings_gc_9_2_1_vg12,
319 						ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
320 		break;
321 	case CHIP_VEGA20:
322 		soc15_program_register_sequence(adev,
323 						golden_settings_gc_9_0,
324 						ARRAY_SIZE(golden_settings_gc_9_0));
325 		soc15_program_register_sequence(adev,
326 						golden_settings_gc_9_0_vg20,
327 						ARRAY_SIZE(golden_settings_gc_9_0_vg20));
328 		break;
329 	case CHIP_RAVEN:
330 		soc15_program_register_sequence(adev, golden_settings_gc_9_1,
331 						ARRAY_SIZE(golden_settings_gc_9_1));
332 		if (adev->rev_id >= 8)
333 			soc15_program_register_sequence(adev,
334 							golden_settings_gc_9_1_rv2,
335 							ARRAY_SIZE(golden_settings_gc_9_1_rv2));
336 		else
337 			soc15_program_register_sequence(adev,
338 							golden_settings_gc_9_1_rv1,
339 							ARRAY_SIZE(golden_settings_gc_9_1_rv1));
340 		break;
341 	default:
342 		break;
343 	}
344 
345 	soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
346 					(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
347 }
348 
349 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
350 {
351 	adev->gfx.scratch.num_reg = 8;
352 	adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
353 	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
354 }
355 
356 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
357 				       bool wc, uint32_t reg, uint32_t val)
358 {
359 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
360 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
361 				WRITE_DATA_DST_SEL(0) |
362 				(wc ? WR_CONFIRM : 0));
363 	amdgpu_ring_write(ring, reg);
364 	amdgpu_ring_write(ring, 0);
365 	amdgpu_ring_write(ring, val);
366 }
367 
368 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
369 				  int mem_space, int opt, uint32_t addr0,
370 				  uint32_t addr1, uint32_t ref, uint32_t mask,
371 				  uint32_t inv)
372 {
373 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
374 	amdgpu_ring_write(ring,
375 				 /* memory (1) or register (0) */
376 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
377 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
378 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
379 				 WAIT_REG_MEM_ENGINE(eng_sel)));
380 
381 	if (mem_space)
382 		BUG_ON(addr0 & 0x3); /* Dword align */
383 	amdgpu_ring_write(ring, addr0);
384 	amdgpu_ring_write(ring, addr1);
385 	amdgpu_ring_write(ring, ref);
386 	amdgpu_ring_write(ring, mask);
387 	amdgpu_ring_write(ring, inv); /* poll interval */
388 }
389 
390 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
391 {
392 	struct amdgpu_device *adev = ring->adev;
393 	uint32_t scratch;
394 	uint32_t tmp = 0;
395 	unsigned i;
396 	int r;
397 
398 	r = amdgpu_gfx_scratch_get(adev, &scratch);
399 	if (r) {
400 		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
401 		return r;
402 	}
403 	WREG32(scratch, 0xCAFEDEAD);
404 	r = amdgpu_ring_alloc(ring, 3);
405 	if (r) {
406 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
407 			  ring->idx, r);
408 		amdgpu_gfx_scratch_free(adev, scratch);
409 		return r;
410 	}
411 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
412 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
413 	amdgpu_ring_write(ring, 0xDEADBEEF);
414 	amdgpu_ring_commit(ring);
415 
416 	for (i = 0; i < adev->usec_timeout; i++) {
417 		tmp = RREG32(scratch);
418 		if (tmp == 0xDEADBEEF)
419 			break;
420 		DRM_UDELAY(1);
421 	}
422 	if (i < adev->usec_timeout) {
423 		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
424 			 ring->idx, i);
425 	} else {
426 		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
427 			  ring->idx, scratch, tmp);
428 		r = -EINVAL;
429 	}
430 	amdgpu_gfx_scratch_free(adev, scratch);
431 	return r;
432 }
433 
434 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
435 {
436 	struct amdgpu_device *adev = ring->adev;
437 	struct amdgpu_ib ib;
438 	struct dma_fence *f = NULL;
439 
440 	unsigned index;
441 	uint64_t gpu_addr;
442 	uint32_t tmp;
443 	long r;
444 
445 	r = amdgpu_device_wb_get(adev, &index);
446 	if (r) {
447 		dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
448 		return r;
449 	}
450 
451 	gpu_addr = adev->wb.gpu_addr + (index * 4);
452 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
453 	memset(&ib, 0, sizeof(ib));
454 	r = amdgpu_ib_get(adev, NULL, 16, &ib);
455 	if (r) {
456 		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
457 		goto err1;
458 	}
459 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
460 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
461 	ib.ptr[2] = lower_32_bits(gpu_addr);
462 	ib.ptr[3] = upper_32_bits(gpu_addr);
463 	ib.ptr[4] = 0xDEADBEEF;
464 	ib.length_dw = 5;
465 
466 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
467 	if (r)
468 		goto err2;
469 
470 	r = dma_fence_wait_timeout(f, false, timeout);
471 	if (r == 0) {
472 			DRM_ERROR("amdgpu: IB test timed out.\n");
473 			r = -ETIMEDOUT;
474 			goto err2;
475 	} else if (r < 0) {
476 			DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
477 			goto err2;
478 	}
479 
480 	tmp = adev->wb.wb[index];
481 	if (tmp == 0xDEADBEEF) {
482 			DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
483 			r = 0;
484 	} else {
485 			DRM_ERROR("ib test on ring %d failed\n", ring->idx);
486 			r = -EINVAL;
487 	}
488 
489 err2:
490 	amdgpu_ib_free(adev, &ib, NULL);
491 	dma_fence_put(f);
492 err1:
493 	amdgpu_device_wb_free(adev, index);
494 	return r;
495 }
496 
497 
498 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
499 {
500 	release_firmware(adev->gfx.pfp_fw);
501 	adev->gfx.pfp_fw = NULL;
502 	release_firmware(adev->gfx.me_fw);
503 	adev->gfx.me_fw = NULL;
504 	release_firmware(adev->gfx.ce_fw);
505 	adev->gfx.ce_fw = NULL;
506 	release_firmware(adev->gfx.rlc_fw);
507 	adev->gfx.rlc_fw = NULL;
508 	release_firmware(adev->gfx.mec_fw);
509 	adev->gfx.mec_fw = NULL;
510 	release_firmware(adev->gfx.mec2_fw);
511 	adev->gfx.mec2_fw = NULL;
512 
513 	kfree(adev->gfx.rlc.register_list_format);
514 }
515 
516 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
517 {
518 	const struct rlc_firmware_header_v2_1 *rlc_hdr;
519 
520 	rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
521 	adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
522 	adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
523 	adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
524 	adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
525 	adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
526 	adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
527 	adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
528 	adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
529 	adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
530 	adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
531 	adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
532 	adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
533 	adev->gfx.rlc.reg_list_format_direct_reg_list_length =
534 			le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
535 }
536 
537 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
538 {
539 	adev->gfx.me_fw_write_wait = false;
540 	adev->gfx.mec_fw_write_wait = false;
541 
542 	switch (adev->asic_type) {
543 	case CHIP_VEGA10:
544 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
545 		    (adev->gfx.me_feature_version >= 42) &&
546 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
547 		    (adev->gfx.pfp_feature_version >= 42))
548 			adev->gfx.me_fw_write_wait = true;
549 
550 		if ((adev->gfx.mec_fw_version >=  0x00000193) &&
551 		    (adev->gfx.mec_feature_version >= 42))
552 			adev->gfx.mec_fw_write_wait = true;
553 		break;
554 	case CHIP_VEGA12:
555 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
556 		    (adev->gfx.me_feature_version >= 44) &&
557 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
558 		    (adev->gfx.pfp_feature_version >= 44))
559 			adev->gfx.me_fw_write_wait = true;
560 
561 		if ((adev->gfx.mec_fw_version >=  0x00000196) &&
562 		    (adev->gfx.mec_feature_version >= 44))
563 			adev->gfx.mec_fw_write_wait = true;
564 		break;
565 	case CHIP_VEGA20:
566 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
567 		    (adev->gfx.me_feature_version >= 44) &&
568 		    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
569 		    (adev->gfx.pfp_feature_version >= 44))
570 			adev->gfx.me_fw_write_wait = true;
571 
572 		if ((adev->gfx.mec_fw_version >=  0x00000197) &&
573 		    (adev->gfx.mec_feature_version >= 44))
574 			adev->gfx.mec_fw_write_wait = true;
575 		break;
576 	case CHIP_RAVEN:
577 		if ((adev->gfx.me_fw_version >= 0x0000009c) &&
578 		    (adev->gfx.me_feature_version >= 42) &&
579 		    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
580 		    (adev->gfx.pfp_feature_version >= 42))
581 			adev->gfx.me_fw_write_wait = true;
582 
583 		if ((adev->gfx.mec_fw_version >=  0x00000192) &&
584 		    (adev->gfx.mec_feature_version >= 42))
585 			adev->gfx.mec_fw_write_wait = true;
586 		break;
587 	default:
588 		break;
589 	}
590 }
591 
592 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
593 {
594 	const char *chip_name;
595 	char fw_name[30];
596 	int err;
597 	struct amdgpu_firmware_info *info = NULL;
598 	const struct common_firmware_header *header = NULL;
599 	const struct gfx_firmware_header_v1_0 *cp_hdr;
600 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
601 	unsigned int *tmp = NULL;
602 	unsigned int i = 0;
603 	uint16_t version_major;
604 	uint16_t version_minor;
605 
606 	DRM_DEBUG("\n");
607 
608 	switch (adev->asic_type) {
609 	case CHIP_VEGA10:
610 		chip_name = "vega10";
611 		break;
612 	case CHIP_VEGA12:
613 		chip_name = "vega12";
614 		break;
615 	case CHIP_VEGA20:
616 		chip_name = "vega20";
617 		break;
618 	case CHIP_RAVEN:
619 		if (adev->rev_id >= 8)
620 			chip_name = "raven2";
621 		else if (adev->pdev->device == 0x15d8)
622 			chip_name = "picasso";
623 		else
624 			chip_name = "raven";
625 		break;
626 	default:
627 		BUG();
628 	}
629 
630 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
631 	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
632 	if (err)
633 		goto out;
634 	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
635 	if (err)
636 		goto out;
637 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
638 	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
639 	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
640 
641 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
642 	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
643 	if (err)
644 		goto out;
645 	err = amdgpu_ucode_validate(adev->gfx.me_fw);
646 	if (err)
647 		goto out;
648 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
649 	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
650 	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
651 
652 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
653 	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
654 	if (err)
655 		goto out;
656 	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
657 	if (err)
658 		goto out;
659 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
660 	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
661 	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
662 
663 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
664 	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
665 	if (err)
666 		goto out;
667 	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
668 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
669 
670 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
671 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
672 	if (version_major == 2 && version_minor == 1)
673 		adev->gfx.rlc.is_rlc_v2_1 = true;
674 
675 	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
676 	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
677 	adev->gfx.rlc.save_and_restore_offset =
678 			le32_to_cpu(rlc_hdr->save_and_restore_offset);
679 	adev->gfx.rlc.clear_state_descriptor_offset =
680 			le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
681 	adev->gfx.rlc.avail_scratch_ram_locations =
682 			le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
683 	adev->gfx.rlc.reg_restore_list_size =
684 			le32_to_cpu(rlc_hdr->reg_restore_list_size);
685 	adev->gfx.rlc.reg_list_format_start =
686 			le32_to_cpu(rlc_hdr->reg_list_format_start);
687 	adev->gfx.rlc.reg_list_format_separate_start =
688 			le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
689 	adev->gfx.rlc.starting_offsets_start =
690 			le32_to_cpu(rlc_hdr->starting_offsets_start);
691 	adev->gfx.rlc.reg_list_format_size_bytes =
692 			le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
693 	adev->gfx.rlc.reg_list_size_bytes =
694 			le32_to_cpu(rlc_hdr->reg_list_size_bytes);
695 	adev->gfx.rlc.register_list_format =
696 			kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
697 				adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
698 	if (!adev->gfx.rlc.register_list_format) {
699 		err = -ENOMEM;
700 		goto out;
701 	}
702 
703 	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
704 			le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
705 	for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
706 		adev->gfx.rlc.register_list_format[i] =	le32_to_cpu(tmp[i]);
707 
708 	adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
709 
710 	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
711 			le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
712 	for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
713 		adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
714 
715 	if (adev->gfx.rlc.is_rlc_v2_1)
716 		gfx_v9_0_init_rlc_ext_microcode(adev);
717 
718 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
719 	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
720 	if (err)
721 		goto out;
722 	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
723 	if (err)
724 		goto out;
725 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
726 	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
727 	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
728 
729 
730 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
731 	err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
732 	if (!err) {
733 		err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
734 		if (err)
735 			goto out;
736 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
737 		adev->gfx.mec2_fw->data;
738 		adev->gfx.mec2_fw_version =
739 		le32_to_cpu(cp_hdr->header.ucode_version);
740 		adev->gfx.mec2_feature_version =
741 		le32_to_cpu(cp_hdr->ucode_feature_version);
742 	} else {
743 		err = 0;
744 		adev->gfx.mec2_fw = NULL;
745 	}
746 
747 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
748 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
749 		info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
750 		info->fw = adev->gfx.pfp_fw;
751 		header = (const struct common_firmware_header *)info->fw->data;
752 		adev->firmware.fw_size +=
753 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
754 
755 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
756 		info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
757 		info->fw = adev->gfx.me_fw;
758 		header = (const struct common_firmware_header *)info->fw->data;
759 		adev->firmware.fw_size +=
760 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
761 
762 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
763 		info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
764 		info->fw = adev->gfx.ce_fw;
765 		header = (const struct common_firmware_header *)info->fw->data;
766 		adev->firmware.fw_size +=
767 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
768 
769 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
770 		info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
771 		info->fw = adev->gfx.rlc_fw;
772 		header = (const struct common_firmware_header *)info->fw->data;
773 		adev->firmware.fw_size +=
774 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
775 
776 		if (adev->gfx.rlc.is_rlc_v2_1 &&
777 		    adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
778 		    adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
779 		    adev->gfx.rlc.save_restore_list_srm_size_bytes) {
780 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
781 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
782 			info->fw = adev->gfx.rlc_fw;
783 			adev->firmware.fw_size +=
784 				ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
785 
786 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
787 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
788 			info->fw = adev->gfx.rlc_fw;
789 			adev->firmware.fw_size +=
790 				ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
791 
792 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
793 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
794 			info->fw = adev->gfx.rlc_fw;
795 			adev->firmware.fw_size +=
796 				ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
797 		}
798 
799 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
800 		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
801 		info->fw = adev->gfx.mec_fw;
802 		header = (const struct common_firmware_header *)info->fw->data;
803 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
804 		adev->firmware.fw_size +=
805 			ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
806 
807 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
808 		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
809 		info->fw = adev->gfx.mec_fw;
810 		adev->firmware.fw_size +=
811 			ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
812 
813 		if (adev->gfx.mec2_fw) {
814 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
815 			info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
816 			info->fw = adev->gfx.mec2_fw;
817 			header = (const struct common_firmware_header *)info->fw->data;
818 			cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
819 			adev->firmware.fw_size +=
820 				ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
821 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
822 			info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
823 			info->fw = adev->gfx.mec2_fw;
824 			adev->firmware.fw_size +=
825 				ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
826 		}
827 
828 	}
829 
830 out:
831 	gfx_v9_0_check_fw_write_wait(adev);
832 	if (err) {
833 		dev_err(adev->dev,
834 			"gfx9: Failed to load firmware \"%s\"\n",
835 			fw_name);
836 		release_firmware(adev->gfx.pfp_fw);
837 		adev->gfx.pfp_fw = NULL;
838 		release_firmware(adev->gfx.me_fw);
839 		adev->gfx.me_fw = NULL;
840 		release_firmware(adev->gfx.ce_fw);
841 		adev->gfx.ce_fw = NULL;
842 		release_firmware(adev->gfx.rlc_fw);
843 		adev->gfx.rlc_fw = NULL;
844 		release_firmware(adev->gfx.mec_fw);
845 		adev->gfx.mec_fw = NULL;
846 		release_firmware(adev->gfx.mec2_fw);
847 		adev->gfx.mec2_fw = NULL;
848 	}
849 	return err;
850 }
851 
852 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
853 {
854 	u32 count = 0;
855 	const struct cs_section_def *sect = NULL;
856 	const struct cs_extent_def *ext = NULL;
857 
858 	/* begin clear state */
859 	count += 2;
860 	/* context control state */
861 	count += 3;
862 
863 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
864 		for (ext = sect->section; ext->extent != NULL; ++ext) {
865 			if (sect->id == SECT_CONTEXT)
866 				count += 2 + ext->reg_count;
867 			else
868 				return 0;
869 		}
870 	}
871 
872 	/* end clear state */
873 	count += 2;
874 	/* clear state */
875 	count += 2;
876 
877 	return count;
878 }
879 
880 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
881 				    volatile u32 *buffer)
882 {
883 	u32 count = 0, i;
884 	const struct cs_section_def *sect = NULL;
885 	const struct cs_extent_def *ext = NULL;
886 
887 	if (adev->gfx.rlc.cs_data == NULL)
888 		return;
889 	if (buffer == NULL)
890 		return;
891 
892 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
893 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
894 
895 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
896 	buffer[count++] = cpu_to_le32(0x80000000);
897 	buffer[count++] = cpu_to_le32(0x80000000);
898 
899 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
900 		for (ext = sect->section; ext->extent != NULL; ++ext) {
901 			if (sect->id == SECT_CONTEXT) {
902 				buffer[count++] =
903 					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
904 				buffer[count++] = cpu_to_le32(ext->reg_index -
905 						PACKET3_SET_CONTEXT_REG_START);
906 				for (i = 0; i < ext->reg_count; i++)
907 					buffer[count++] = cpu_to_le32(ext->extent[i]);
908 			} else {
909 				return;
910 			}
911 		}
912 	}
913 
914 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
915 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
916 
917 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
918 	buffer[count++] = cpu_to_le32(0);
919 }
920 
921 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
922 {
923 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
924 	uint32_t pg_always_on_cu_num = 2;
925 	uint32_t always_on_cu_num;
926 	uint32_t i, j, k;
927 	uint32_t mask, cu_bitmap, counter;
928 
929 	if (adev->flags & AMD_IS_APU)
930 		always_on_cu_num = 4;
931 	else if (adev->asic_type == CHIP_VEGA12)
932 		always_on_cu_num = 8;
933 	else
934 		always_on_cu_num = 12;
935 
936 	mutex_lock(&adev->grbm_idx_mutex);
937 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
938 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
939 			mask = 1;
940 			cu_bitmap = 0;
941 			counter = 0;
942 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
943 
944 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
945 				if (cu_info->bitmap[i][j] & mask) {
946 					if (counter == pg_always_on_cu_num)
947 						WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
948 					if (counter < always_on_cu_num)
949 						cu_bitmap |= mask;
950 					else
951 						break;
952 					counter++;
953 				}
954 				mask <<= 1;
955 			}
956 
957 			WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
958 			cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
959 		}
960 	}
961 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
962 	mutex_unlock(&adev->grbm_idx_mutex);
963 }
964 
965 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
966 {
967 	uint32_t data;
968 
969 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
970 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
971 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
972 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
973 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
974 
975 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
976 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
977 
978 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
979 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
980 
981 	mutex_lock(&adev->grbm_idx_mutex);
982 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
983 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
984 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
985 
986 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
987 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
988 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
989 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
990 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
991 
992 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
993 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
994 	data &= 0x0000FFFF;
995 	data |= 0x00C00000;
996 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
997 
998 	/*
999 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1000 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1001 	 */
1002 
1003 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1004 	 * but used for RLC_LB_CNTL configuration */
1005 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1006 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1007 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1008 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1009 	mutex_unlock(&adev->grbm_idx_mutex);
1010 
1011 	gfx_v9_0_init_always_on_cu_mask(adev);
1012 }
1013 
1014 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1015 {
1016 	uint32_t data;
1017 
1018 	/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1019 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1020 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1021 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1022 	WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1023 
1024 	/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1025 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1026 
1027 	/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1028 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1029 
1030 	mutex_lock(&adev->grbm_idx_mutex);
1031 	/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1032 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1033 	WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1034 
1035 	/* set mmRLC_LB_PARAMS = 0x003F_1006 */
1036 	data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1037 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1038 	data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1039 	WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1040 
1041 	/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1042 	data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1043 	data &= 0x0000FFFF;
1044 	data |= 0x00C00000;
1045 	WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1046 
1047 	/*
1048 	 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1049 	 * programmed in gfx_v9_0_init_always_on_cu_mask()
1050 	 */
1051 
1052 	/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1053 	 * but used for RLC_LB_CNTL configuration */
1054 	data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1055 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1056 	data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1057 	WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1058 	mutex_unlock(&adev->grbm_idx_mutex);
1059 
1060 	gfx_v9_0_init_always_on_cu_mask(adev);
1061 }
1062 
1063 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1064 {
1065 	WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1066 }
1067 
1068 static void rv_init_cp_jump_table(struct amdgpu_device *adev)
1069 {
1070 	const __le32 *fw_data;
1071 	volatile u32 *dst_ptr;
1072 	int me, i, max_me = 5;
1073 	u32 bo_offset = 0;
1074 	u32 table_offset, table_size;
1075 
1076 	/* write the cp table buffer */
1077 	dst_ptr = adev->gfx.rlc.cp_table_ptr;
1078 	for (me = 0; me < max_me; me++) {
1079 		if (me == 0) {
1080 			const struct gfx_firmware_header_v1_0 *hdr =
1081 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1082 			fw_data = (const __le32 *)
1083 				(adev->gfx.ce_fw->data +
1084 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1085 			table_offset = le32_to_cpu(hdr->jt_offset);
1086 			table_size = le32_to_cpu(hdr->jt_size);
1087 		} else if (me == 1) {
1088 			const struct gfx_firmware_header_v1_0 *hdr =
1089 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1090 			fw_data = (const __le32 *)
1091 				(adev->gfx.pfp_fw->data +
1092 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1093 			table_offset = le32_to_cpu(hdr->jt_offset);
1094 			table_size = le32_to_cpu(hdr->jt_size);
1095 		} else if (me == 2) {
1096 			const struct gfx_firmware_header_v1_0 *hdr =
1097 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1098 			fw_data = (const __le32 *)
1099 				(adev->gfx.me_fw->data +
1100 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1101 			table_offset = le32_to_cpu(hdr->jt_offset);
1102 			table_size = le32_to_cpu(hdr->jt_size);
1103 		} else if (me == 3) {
1104 			const struct gfx_firmware_header_v1_0 *hdr =
1105 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1106 			fw_data = (const __le32 *)
1107 				(adev->gfx.mec_fw->data +
1108 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1109 			table_offset = le32_to_cpu(hdr->jt_offset);
1110 			table_size = le32_to_cpu(hdr->jt_size);
1111 		} else  if (me == 4) {
1112 			const struct gfx_firmware_header_v1_0 *hdr =
1113 				(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
1114 			fw_data = (const __le32 *)
1115 				(adev->gfx.mec2_fw->data +
1116 				 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1117 			table_offset = le32_to_cpu(hdr->jt_offset);
1118 			table_size = le32_to_cpu(hdr->jt_size);
1119 		}
1120 
1121 		for (i = 0; i < table_size; i ++) {
1122 			dst_ptr[bo_offset + i] =
1123 				cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
1124 		}
1125 
1126 		bo_offset += table_size;
1127 	}
1128 }
1129 
1130 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
1131 {
1132 	/* clear state block */
1133 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1134 			&adev->gfx.rlc.clear_state_gpu_addr,
1135 			(void **)&adev->gfx.rlc.cs_ptr);
1136 
1137 	/* jump table block */
1138 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1139 			&adev->gfx.rlc.cp_table_gpu_addr,
1140 			(void **)&adev->gfx.rlc.cp_table_ptr);
1141 }
1142 
1143 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1144 {
1145 	volatile u32 *dst_ptr;
1146 	u32 dws;
1147 	const struct cs_section_def *cs_data;
1148 	int r;
1149 
1150 	adev->gfx.rlc.cs_data = gfx9_cs_data;
1151 
1152 	cs_data = adev->gfx.rlc.cs_data;
1153 
1154 	if (cs_data) {
1155 		/* clear state block */
1156 		adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
1157 		r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
1158 					      AMDGPU_GEM_DOMAIN_VRAM,
1159 					      &adev->gfx.rlc.clear_state_obj,
1160 					      &adev->gfx.rlc.clear_state_gpu_addr,
1161 					      (void **)&adev->gfx.rlc.cs_ptr);
1162 		if (r) {
1163 			dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
1164 				r);
1165 			gfx_v9_0_rlc_fini(adev);
1166 			return r;
1167 		}
1168 		/* set up the cs buffer */
1169 		dst_ptr = adev->gfx.rlc.cs_ptr;
1170 		gfx_v9_0_get_csb_buffer(adev, dst_ptr);
1171 		amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
1172 		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1173 		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1174 	}
1175 
1176 	if (adev->asic_type == CHIP_RAVEN) {
1177 		/* TODO: double check the cp_table_size for RV */
1178 		adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1179 		r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
1180 					      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1181 					      &adev->gfx.rlc.cp_table_obj,
1182 					      &adev->gfx.rlc.cp_table_gpu_addr,
1183 					      (void **)&adev->gfx.rlc.cp_table_ptr);
1184 		if (r) {
1185 			dev_err(adev->dev,
1186 				"(%d) failed to create cp table bo\n", r);
1187 			gfx_v9_0_rlc_fini(adev);
1188 			return r;
1189 		}
1190 
1191 		rv_init_cp_jump_table(adev);
1192 		amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
1193 		amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
1194 	}
1195 
1196 	switch (adev->asic_type) {
1197 	case CHIP_RAVEN:
1198 		gfx_v9_0_init_lbpw(adev);
1199 		break;
1200 	case CHIP_VEGA20:
1201 		gfx_v9_4_init_lbpw(adev);
1202 		break;
1203 	default:
1204 		break;
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
1211 {
1212 	int r;
1213 
1214 	r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
1215 	if (unlikely(r != 0))
1216 		return r;
1217 
1218 	r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1219 			AMDGPU_GEM_DOMAIN_VRAM);
1220 	if (!r)
1221 		adev->gfx.rlc.clear_state_gpu_addr =
1222 			amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1223 
1224 	amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1225 
1226 	return r;
1227 }
1228 
1229 static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
1230 {
1231 	int r;
1232 
1233 	if (!adev->gfx.rlc.clear_state_obj)
1234 		return;
1235 
1236 	r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1237 	if (likely(r == 0)) {
1238 		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1239 		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1240 	}
1241 }
1242 
1243 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1244 {
1245 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1246 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1247 }
1248 
1249 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1250 {
1251 	int r;
1252 	u32 *hpd;
1253 	const __le32 *fw_data;
1254 	unsigned fw_size;
1255 	u32 *fw;
1256 	size_t mec_hpd_size;
1257 
1258 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1259 
1260 	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1261 
1262 	/* take ownership of the relevant compute queues */
1263 	amdgpu_gfx_compute_queue_acquire(adev);
1264 	mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1265 
1266 	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1267 				      AMDGPU_GEM_DOMAIN_GTT,
1268 				      &adev->gfx.mec.hpd_eop_obj,
1269 				      &adev->gfx.mec.hpd_eop_gpu_addr,
1270 				      (void **)&hpd);
1271 	if (r) {
1272 		dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1273 		gfx_v9_0_mec_fini(adev);
1274 		return r;
1275 	}
1276 
1277 	memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1278 
1279 	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1280 	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1281 
1282 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1283 
1284 	fw_data = (const __le32 *)
1285 		(adev->gfx.mec_fw->data +
1286 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1287 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1288 
1289 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1290 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1291 				      &adev->gfx.mec.mec_fw_obj,
1292 				      &adev->gfx.mec.mec_fw_gpu_addr,
1293 				      (void **)&fw);
1294 	if (r) {
1295 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1296 		gfx_v9_0_mec_fini(adev);
1297 		return r;
1298 	}
1299 
1300 	memcpy(fw, fw_data, fw_size);
1301 
1302 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1303 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1304 
1305 	return 0;
1306 }
1307 
1308 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1309 {
1310 	WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1311 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1312 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1313 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
1314 		(SQ_IND_INDEX__FORCE_READ_MASK));
1315 	return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1316 }
1317 
1318 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1319 			   uint32_t wave, uint32_t thread,
1320 			   uint32_t regno, uint32_t num, uint32_t *out)
1321 {
1322 	WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1323 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1324 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1325 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
1326 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1327 		(SQ_IND_INDEX__FORCE_READ_MASK) |
1328 		(SQ_IND_INDEX__AUTO_INCR_MASK));
1329 	while (num--)
1330 		*(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1331 }
1332 
1333 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1334 {
1335 	/* type 1 wave data */
1336 	dst[(*no_fields)++] = 1;
1337 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1338 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1339 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1340 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1341 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1342 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1343 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1344 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1345 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1346 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1347 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1348 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1349 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1350 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1351 }
1352 
1353 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1354 				     uint32_t wave, uint32_t start,
1355 				     uint32_t size, uint32_t *dst)
1356 {
1357 	wave_read_regs(
1358 		adev, simd, wave, 0,
1359 		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1360 }
1361 
1362 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1363 				     uint32_t wave, uint32_t thread,
1364 				     uint32_t start, uint32_t size,
1365 				     uint32_t *dst)
1366 {
1367 	wave_read_regs(
1368 		adev, simd, wave, thread,
1369 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1370 }
1371 
1372 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1373 				  u32 me, u32 pipe, u32 q)
1374 {
1375 	soc15_grbm_select(adev, me, pipe, q, 0);
1376 }
1377 
1378 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1379 	.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1380 	.select_se_sh = &gfx_v9_0_select_se_sh,
1381 	.read_wave_data = &gfx_v9_0_read_wave_data,
1382 	.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1383 	.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1384 	.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
1385 };
1386 
1387 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1388 {
1389 	u32 gb_addr_config;
1390 	int err;
1391 
1392 	adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1393 
1394 	switch (adev->asic_type) {
1395 	case CHIP_VEGA10:
1396 		adev->gfx.config.max_hw_contexts = 8;
1397 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1398 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1399 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1400 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1401 		gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1402 		break;
1403 	case CHIP_VEGA12:
1404 		adev->gfx.config.max_hw_contexts = 8;
1405 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1406 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1407 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1408 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1409 		gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
1410 		DRM_INFO("fix gfx.config for vega12\n");
1411 		break;
1412 	case CHIP_VEGA20:
1413 		adev->gfx.config.max_hw_contexts = 8;
1414 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1415 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1416 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1417 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1418 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1419 		gb_addr_config &= ~0xf3e777ff;
1420 		gb_addr_config |= 0x22014042;
1421 		/* check vbios table if gpu info is not available */
1422 		err = amdgpu_atomfirmware_get_gfx_info(adev);
1423 		if (err)
1424 			return err;
1425 		break;
1426 	case CHIP_RAVEN:
1427 		adev->gfx.config.max_hw_contexts = 8;
1428 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1429 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1430 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1431 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1432 		if (adev->rev_id >= 8)
1433 			gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
1434 		else
1435 			gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1436 		break;
1437 	default:
1438 		BUG();
1439 		break;
1440 	}
1441 
1442 	adev->gfx.config.gb_addr_config = gb_addr_config;
1443 
1444 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1445 			REG_GET_FIELD(
1446 					adev->gfx.config.gb_addr_config,
1447 					GB_ADDR_CONFIG,
1448 					NUM_PIPES);
1449 
1450 	adev->gfx.config.max_tile_pipes =
1451 		adev->gfx.config.gb_addr_config_fields.num_pipes;
1452 
1453 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1454 			REG_GET_FIELD(
1455 					adev->gfx.config.gb_addr_config,
1456 					GB_ADDR_CONFIG,
1457 					NUM_BANKS);
1458 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1459 			REG_GET_FIELD(
1460 					adev->gfx.config.gb_addr_config,
1461 					GB_ADDR_CONFIG,
1462 					MAX_COMPRESSED_FRAGS);
1463 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1464 			REG_GET_FIELD(
1465 					adev->gfx.config.gb_addr_config,
1466 					GB_ADDR_CONFIG,
1467 					NUM_RB_PER_SE);
1468 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1469 			REG_GET_FIELD(
1470 					adev->gfx.config.gb_addr_config,
1471 					GB_ADDR_CONFIG,
1472 					NUM_SHADER_ENGINES);
1473 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1474 			REG_GET_FIELD(
1475 					adev->gfx.config.gb_addr_config,
1476 					GB_ADDR_CONFIG,
1477 					PIPE_INTERLEAVE_SIZE));
1478 
1479 	return 0;
1480 }
1481 
1482 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1483 				   struct amdgpu_ngg_buf *ngg_buf,
1484 				   int size_se,
1485 				   int default_size_se)
1486 {
1487 	int r;
1488 
1489 	if (size_se < 0) {
1490 		dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1491 		return -EINVAL;
1492 	}
1493 	size_se = size_se ? size_se : default_size_se;
1494 
1495 	ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1496 	r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1497 				    PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1498 				    &ngg_buf->bo,
1499 				    &ngg_buf->gpu_addr,
1500 				    NULL);
1501 	if (r) {
1502 		dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1503 		return r;
1504 	}
1505 	ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1506 
1507 	return r;
1508 }
1509 
1510 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1511 {
1512 	int i;
1513 
1514 	for (i = 0; i < NGG_BUF_MAX; i++)
1515 		amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1516 				      &adev->gfx.ngg.buf[i].gpu_addr,
1517 				      NULL);
1518 
1519 	memset(&adev->gfx.ngg.buf[0], 0,
1520 			sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1521 
1522 	adev->gfx.ngg.init = false;
1523 
1524 	return 0;
1525 }
1526 
1527 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1528 {
1529 	int r;
1530 
1531 	if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1532 		return 0;
1533 
1534 	/* GDS reserve memory: 64 bytes alignment */
1535 	adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1536 	adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1537 	adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1538 	adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
1539 	adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
1540 
1541 	/* Primitive Buffer */
1542 	r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1543 				    amdgpu_prim_buf_per_se,
1544 				    64 * 1024);
1545 	if (r) {
1546 		dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1547 		goto err;
1548 	}
1549 
1550 	/* Position Buffer */
1551 	r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1552 				    amdgpu_pos_buf_per_se,
1553 				    256 * 1024);
1554 	if (r) {
1555 		dev_err(adev->dev, "Failed to create Position Buffer\n");
1556 		goto err;
1557 	}
1558 
1559 	/* Control Sideband */
1560 	r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1561 				    amdgpu_cntl_sb_buf_per_se,
1562 				    256);
1563 	if (r) {
1564 		dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1565 		goto err;
1566 	}
1567 
1568 	/* Parameter Cache, not created by default */
1569 	if (amdgpu_param_buf_per_se <= 0)
1570 		goto out;
1571 
1572 	r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1573 				    amdgpu_param_buf_per_se,
1574 				    512 * 1024);
1575 	if (r) {
1576 		dev_err(adev->dev, "Failed to create Parameter Cache\n");
1577 		goto err;
1578 	}
1579 
1580 out:
1581 	adev->gfx.ngg.init = true;
1582 	return 0;
1583 err:
1584 	gfx_v9_0_ngg_fini(adev);
1585 	return r;
1586 }
1587 
1588 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1589 {
1590 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1591 	int r;
1592 	u32 data, base;
1593 
1594 	if (!amdgpu_ngg)
1595 		return 0;
1596 
1597 	/* Program buffer size */
1598 	data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
1599 			     adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
1600 	data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
1601 			     adev->gfx.ngg.buf[NGG_POS].size >> 8);
1602 	WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1603 
1604 	data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
1605 			     adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
1606 	data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
1607 			     adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
1608 	WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1609 
1610 	/* Program buffer base address */
1611 	base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1612 	data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1613 	WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1614 
1615 	base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1616 	data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1617 	WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1618 
1619 	base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1620 	data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1621 	WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1622 
1623 	base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1624 	data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1625 	WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1626 
1627 	base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1628 	data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1629 	WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1630 
1631 	base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1632 	data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1633 	WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1634 
1635 	/* Clear GDS reserved memory */
1636 	r = amdgpu_ring_alloc(ring, 17);
1637 	if (r) {
1638 		DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1639 			  ring->idx, r);
1640 		return r;
1641 	}
1642 
1643 	gfx_v9_0_write_data_to_reg(ring, 0, false,
1644 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
1645 			           (adev->gds.mem.total_size +
1646 				    adev->gfx.ngg.gds_reserve_size));
1647 
1648 	amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1649 	amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1650 				PACKET3_DMA_DATA_DST_SEL(1) |
1651 				PACKET3_DMA_DATA_SRC_SEL(2)));
1652 	amdgpu_ring_write(ring, 0);
1653 	amdgpu_ring_write(ring, 0);
1654 	amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1655 	amdgpu_ring_write(ring, 0);
1656 	amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
1657 				adev->gfx.ngg.gds_reserve_size);
1658 
1659 	gfx_v9_0_write_data_to_reg(ring, 0, false,
1660 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
1661 
1662 	amdgpu_ring_commit(ring);
1663 
1664 	return 0;
1665 }
1666 
1667 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1668 				      int mec, int pipe, int queue)
1669 {
1670 	int r;
1671 	unsigned irq_type;
1672 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1673 
1674 	ring = &adev->gfx.compute_ring[ring_id];
1675 
1676 	/* mec0 is me1 */
1677 	ring->me = mec + 1;
1678 	ring->pipe = pipe;
1679 	ring->queue = queue;
1680 
1681 	ring->ring_obj = NULL;
1682 	ring->use_doorbell = true;
1683 	ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
1684 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1685 				+ (ring_id * GFX9_MEC_HPD_SIZE);
1686 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1687 
1688 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1689 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1690 		+ ring->pipe;
1691 
1692 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1693 	r = amdgpu_ring_init(adev, ring, 1024,
1694 			     &adev->gfx.eop_irq, irq_type);
1695 	if (r)
1696 		return r;
1697 
1698 
1699 	return 0;
1700 }
1701 
1702 static int gfx_v9_0_sw_init(void *handle)
1703 {
1704 	int i, j, k, r, ring_id;
1705 	struct amdgpu_ring *ring;
1706 	struct amdgpu_kiq *kiq;
1707 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1708 
1709 	switch (adev->asic_type) {
1710 	case CHIP_VEGA10:
1711 	case CHIP_VEGA12:
1712 	case CHIP_VEGA20:
1713 	case CHIP_RAVEN:
1714 		adev->gfx.mec.num_mec = 2;
1715 		break;
1716 	default:
1717 		adev->gfx.mec.num_mec = 1;
1718 		break;
1719 	}
1720 
1721 	adev->gfx.mec.num_pipe_per_mec = 4;
1722 	adev->gfx.mec.num_queue_per_pipe = 8;
1723 
1724 	/* EOP Event */
1725 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1726 	if (r)
1727 		return r;
1728 
1729 	/* Privileged reg */
1730 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1731 			      &adev->gfx.priv_reg_irq);
1732 	if (r)
1733 		return r;
1734 
1735 	/* Privileged inst */
1736 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1737 			      &adev->gfx.priv_inst_irq);
1738 	if (r)
1739 		return r;
1740 
1741 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1742 
1743 	gfx_v9_0_scratch_init(adev);
1744 
1745 	r = gfx_v9_0_init_microcode(adev);
1746 	if (r) {
1747 		DRM_ERROR("Failed to load gfx firmware!\n");
1748 		return r;
1749 	}
1750 
1751 	r = gfx_v9_0_rlc_init(adev);
1752 	if (r) {
1753 		DRM_ERROR("Failed to init rlc BOs!\n");
1754 		return r;
1755 	}
1756 
1757 	r = gfx_v9_0_mec_init(adev);
1758 	if (r) {
1759 		DRM_ERROR("Failed to init MEC BOs!\n");
1760 		return r;
1761 	}
1762 
1763 	/* set up the gfx ring */
1764 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1765 		ring = &adev->gfx.gfx_ring[i];
1766 		ring->ring_obj = NULL;
1767 		if (!i)
1768 			sprintf(ring->name, "gfx");
1769 		else
1770 			sprintf(ring->name, "gfx_%d", i);
1771 		ring->use_doorbell = true;
1772 		ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
1773 		r = amdgpu_ring_init(adev, ring, 1024,
1774 				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1775 		if (r)
1776 			return r;
1777 	}
1778 
1779 	/* set up the compute queues - allocate horizontally across pipes */
1780 	ring_id = 0;
1781 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1782 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1783 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1784 				if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1785 					continue;
1786 
1787 				r = gfx_v9_0_compute_ring_init(adev,
1788 							       ring_id,
1789 							       i, k, j);
1790 				if (r)
1791 					return r;
1792 
1793 				ring_id++;
1794 			}
1795 		}
1796 	}
1797 
1798 	r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1799 	if (r) {
1800 		DRM_ERROR("Failed to init KIQ BOs!\n");
1801 		return r;
1802 	}
1803 
1804 	kiq = &adev->gfx.kiq;
1805 	r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1806 	if (r)
1807 		return r;
1808 
1809 	/* create MQD for all compute queues as wel as KIQ for SRIOV case */
1810 	r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1811 	if (r)
1812 		return r;
1813 
1814 	adev->gfx.ce_ram_size = 0x8000;
1815 
1816 	r = gfx_v9_0_gpu_early_init(adev);
1817 	if (r)
1818 		return r;
1819 
1820 	r = gfx_v9_0_ngg_init(adev);
1821 	if (r)
1822 		return r;
1823 
1824 	return 0;
1825 }
1826 
1827 
1828 static int gfx_v9_0_sw_fini(void *handle)
1829 {
1830 	int i;
1831 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1832 
1833 	amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1834 	amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1835 	amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1836 
1837 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1838 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1839 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
1840 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1841 
1842 	amdgpu_gfx_compute_mqd_sw_fini(adev);
1843 	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1844 	amdgpu_gfx_kiq_fini(adev);
1845 
1846 	gfx_v9_0_mec_fini(adev);
1847 	gfx_v9_0_ngg_fini(adev);
1848 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1849 				&adev->gfx.rlc.clear_state_gpu_addr,
1850 				(void **)&adev->gfx.rlc.cs_ptr);
1851 	if (adev->asic_type == CHIP_RAVEN) {
1852 		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1853 				&adev->gfx.rlc.cp_table_gpu_addr,
1854 				(void **)&adev->gfx.rlc.cp_table_ptr);
1855 	}
1856 	gfx_v9_0_free_microcode(adev);
1857 
1858 	return 0;
1859 }
1860 
1861 
1862 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1863 {
1864 	/* TODO */
1865 }
1866 
1867 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1868 {
1869 	u32 data;
1870 
1871 	if (instance == 0xffffffff)
1872 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1873 	else
1874 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1875 
1876 	if (se_num == 0xffffffff)
1877 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1878 	else
1879 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1880 
1881 	if (sh_num == 0xffffffff)
1882 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1883 	else
1884 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1885 
1886 	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1887 }
1888 
1889 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1890 {
1891 	u32 data, mask;
1892 
1893 	data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1894 	data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1895 
1896 	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1897 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1898 
1899 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1900 					 adev->gfx.config.max_sh_per_se);
1901 
1902 	return (~data) & mask;
1903 }
1904 
1905 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1906 {
1907 	int i, j;
1908 	u32 data;
1909 	u32 active_rbs = 0;
1910 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1911 					adev->gfx.config.max_sh_per_se;
1912 
1913 	mutex_lock(&adev->grbm_idx_mutex);
1914 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1915 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1916 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1917 			data = gfx_v9_0_get_rb_active_bitmap(adev);
1918 			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1919 					       rb_bitmap_width_per_sh);
1920 		}
1921 	}
1922 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1923 	mutex_unlock(&adev->grbm_idx_mutex);
1924 
1925 	adev->gfx.config.backend_enable_mask = active_rbs;
1926 	adev->gfx.config.num_rbs = hweight32(active_rbs);
1927 }
1928 
1929 #define DEFAULT_SH_MEM_BASES	(0x6000)
1930 #define FIRST_COMPUTE_VMID	(8)
1931 #define LAST_COMPUTE_VMID	(16)
1932 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1933 {
1934 	int i;
1935 	uint32_t sh_mem_config;
1936 	uint32_t sh_mem_bases;
1937 
1938 	/*
1939 	 * Configure apertures:
1940 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1941 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1942 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1943 	 */
1944 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1945 
1946 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1947 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1948 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1949 
1950 	mutex_lock(&adev->srbm_mutex);
1951 	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1952 		soc15_grbm_select(adev, 0, 0, 0, i);
1953 		/* CP and shaders */
1954 		WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1955 		WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1956 	}
1957 	soc15_grbm_select(adev, 0, 0, 0, 0);
1958 	mutex_unlock(&adev->srbm_mutex);
1959 }
1960 
1961 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
1962 {
1963 	u32 tmp;
1964 	int i;
1965 
1966 	WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1967 
1968 	gfx_v9_0_tiling_mode_table_init(adev);
1969 
1970 	gfx_v9_0_setup_rb(adev);
1971 	gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1972 	adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
1973 
1974 	/* XXX SH_MEM regs */
1975 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1976 	mutex_lock(&adev->srbm_mutex);
1977 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
1978 		soc15_grbm_select(adev, 0, 0, 0, i);
1979 		/* CP and shaders */
1980 		if (i == 0) {
1981 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1982 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1983 			WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1984 			WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1985 		} else {
1986 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1987 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1988 			WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1989 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1990 				(adev->gmc.private_aperture_start >> 48));
1991 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1992 				(adev->gmc.shared_aperture_start >> 48));
1993 			WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1994 		}
1995 	}
1996 	soc15_grbm_select(adev, 0, 0, 0, 0);
1997 
1998 	mutex_unlock(&adev->srbm_mutex);
1999 
2000 	gfx_v9_0_init_compute_vmid(adev);
2001 
2002 	mutex_lock(&adev->grbm_idx_mutex);
2003 	/*
2004 	 * making sure that the following register writes will be broadcasted
2005 	 * to all the shaders
2006 	 */
2007 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2008 
2009 	WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
2010 		   (adev->gfx.config.sc_prim_fifo_size_frontend <<
2011 			PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
2012 		   (adev->gfx.config.sc_prim_fifo_size_backend <<
2013 			PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
2014 		   (adev->gfx.config.sc_hiz_tile_fifo_size <<
2015 			PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
2016 		   (adev->gfx.config.sc_earlyz_tile_fifo_size <<
2017 			PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
2018 	mutex_unlock(&adev->grbm_idx_mutex);
2019 
2020 }
2021 
2022 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2023 {
2024 	u32 i, j, k;
2025 	u32 mask;
2026 
2027 	mutex_lock(&adev->grbm_idx_mutex);
2028 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2029 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2030 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2031 			for (k = 0; k < adev->usec_timeout; k++) {
2032 				if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2033 					break;
2034 				udelay(1);
2035 			}
2036 			if (k == adev->usec_timeout) {
2037 				gfx_v9_0_select_se_sh(adev, 0xffffffff,
2038 						      0xffffffff, 0xffffffff);
2039 				mutex_unlock(&adev->grbm_idx_mutex);
2040 				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2041 					 i, j);
2042 				return;
2043 			}
2044 		}
2045 	}
2046 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2047 	mutex_unlock(&adev->grbm_idx_mutex);
2048 
2049 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2050 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2051 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2052 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2053 	for (k = 0; k < adev->usec_timeout; k++) {
2054 		if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2055 			break;
2056 		udelay(1);
2057 	}
2058 }
2059 
2060 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2061 					       bool enable)
2062 {
2063 	u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2064 
2065 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2066 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2067 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2068 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2069 
2070 	WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2071 }
2072 
2073 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2074 {
2075 	/* csib */
2076 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2077 			adev->gfx.rlc.clear_state_gpu_addr >> 32);
2078 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2079 			adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2080 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2081 			adev->gfx.rlc.clear_state_size);
2082 }
2083 
2084 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2085 				int indirect_offset,
2086 				int list_size,
2087 				int *unique_indirect_regs,
2088 				int unique_indirect_reg_count,
2089 				int *indirect_start_offsets,
2090 				int *indirect_start_offsets_count,
2091 				int max_start_offsets_count)
2092 {
2093 	int idx;
2094 
2095 	for (; indirect_offset < list_size; indirect_offset++) {
2096 		WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2097 		indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2098 		*indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2099 
2100 		while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2101 			indirect_offset += 2;
2102 
2103 			/* look for the matching indice */
2104 			for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2105 				if (unique_indirect_regs[idx] ==
2106 					register_list_format[indirect_offset] ||
2107 					!unique_indirect_regs[idx])
2108 					break;
2109 			}
2110 
2111 			BUG_ON(idx >= unique_indirect_reg_count);
2112 
2113 			if (!unique_indirect_regs[idx])
2114 				unique_indirect_regs[idx] = register_list_format[indirect_offset];
2115 
2116 			indirect_offset++;
2117 		}
2118 	}
2119 }
2120 
2121 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2122 {
2123 	int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2124 	int unique_indirect_reg_count = 0;
2125 
2126 	int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2127 	int indirect_start_offsets_count = 0;
2128 
2129 	int list_size = 0;
2130 	int i = 0, j = 0;
2131 	u32 tmp = 0;
2132 
2133 	u32 *register_list_format =
2134 		kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2135 	if (!register_list_format)
2136 		return -ENOMEM;
2137 	memcpy(register_list_format, adev->gfx.rlc.register_list_format,
2138 		adev->gfx.rlc.reg_list_format_size_bytes);
2139 
2140 	/* setup unique_indirect_regs array and indirect_start_offsets array */
2141 	unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2142 	gfx_v9_1_parse_ind_reg_list(register_list_format,
2143 				    adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2144 				    adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2145 				    unique_indirect_regs,
2146 				    unique_indirect_reg_count,
2147 				    indirect_start_offsets,
2148 				    &indirect_start_offsets_count,
2149 				    ARRAY_SIZE(indirect_start_offsets));
2150 
2151 	/* enable auto inc in case it is disabled */
2152 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2153 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2154 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2155 
2156 	/* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2157 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2158 		RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2159 	for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2160 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2161 			adev->gfx.rlc.register_restore[i]);
2162 
2163 	/* load indirect register */
2164 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2165 		adev->gfx.rlc.reg_list_format_start);
2166 
2167 	/* direct register portion */
2168 	for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2169 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2170 			register_list_format[i]);
2171 
2172 	/* indirect register portion */
2173 	while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2174 		if (register_list_format[i] == 0xFFFFFFFF) {
2175 			WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2176 			continue;
2177 		}
2178 
2179 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2180 		WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2181 
2182 		for (j = 0; j < unique_indirect_reg_count; j++) {
2183 			if (register_list_format[i] == unique_indirect_regs[j]) {
2184 				WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2185 				break;
2186 			}
2187 		}
2188 
2189 		BUG_ON(j >= unique_indirect_reg_count);
2190 
2191 		i++;
2192 	}
2193 
2194 	/* set save/restore list size */
2195 	list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2196 	list_size = list_size >> 1;
2197 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2198 		adev->gfx.rlc.reg_restore_list_size);
2199 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2200 
2201 	/* write the starting offsets to RLC scratch ram */
2202 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2203 		adev->gfx.rlc.starting_offsets_start);
2204 	for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2205 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2206 		       indirect_start_offsets[i]);
2207 
2208 	/* load unique indirect regs*/
2209 	for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2210 		if (unique_indirect_regs[i] != 0) {
2211 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2212 			       + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2213 			       unique_indirect_regs[i] & 0x3FFFF);
2214 
2215 			WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2216 			       + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2217 			       unique_indirect_regs[i] >> 20);
2218 		}
2219 	}
2220 
2221 	kfree(register_list_format);
2222 	return 0;
2223 }
2224 
2225 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2226 {
2227 	WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2228 }
2229 
2230 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2231 					     bool enable)
2232 {
2233 	uint32_t data = 0;
2234 	uint32_t default_data = 0;
2235 
2236 	default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2237 	if (enable == true) {
2238 		/* enable GFXIP control over CGPG */
2239 		data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2240 		if(default_data != data)
2241 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2242 
2243 		/* update status */
2244 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2245 		data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2246 		if(default_data != data)
2247 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2248 	} else {
2249 		/* restore GFXIP control over GCPG */
2250 		data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2251 		if(default_data != data)
2252 			WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2253 	}
2254 }
2255 
2256 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2257 {
2258 	uint32_t data = 0;
2259 
2260 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2261 			      AMD_PG_SUPPORT_GFX_SMG |
2262 			      AMD_PG_SUPPORT_GFX_DMG)) {
2263 		/* init IDLE_POLL_COUNT = 60 */
2264 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2265 		data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2266 		data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2267 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2268 
2269 		/* init RLC PG Delay */
2270 		data = 0;
2271 		data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2272 		data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2273 		data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2274 		data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2275 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2276 
2277 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2278 		data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2279 		data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2280 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2281 
2282 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2283 		data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2284 		data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2285 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2286 
2287 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2288 		data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2289 
2290 		/* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2291 		data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2292 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2293 
2294 		pwr_10_0_gfxip_control_over_cgpg(adev, true);
2295 	}
2296 }
2297 
2298 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2299 						bool enable)
2300 {
2301 	uint32_t data = 0;
2302 	uint32_t default_data = 0;
2303 
2304 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2305 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2306 			     SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2307 			     enable ? 1 : 0);
2308 	if (default_data != data)
2309 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2310 }
2311 
2312 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2313 						bool enable)
2314 {
2315 	uint32_t data = 0;
2316 	uint32_t default_data = 0;
2317 
2318 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2319 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2320 			     SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2321 			     enable ? 1 : 0);
2322 	if(default_data != data)
2323 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2324 }
2325 
2326 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2327 					bool enable)
2328 {
2329 	uint32_t data = 0;
2330 	uint32_t default_data = 0;
2331 
2332 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2333 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2334 			     CP_PG_DISABLE,
2335 			     enable ? 0 : 1);
2336 	if(default_data != data)
2337 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2338 }
2339 
2340 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2341 						bool enable)
2342 {
2343 	uint32_t data, default_data;
2344 
2345 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2346 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2347 			     GFX_POWER_GATING_ENABLE,
2348 			     enable ? 1 : 0);
2349 	if(default_data != data)
2350 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2351 }
2352 
2353 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2354 						bool enable)
2355 {
2356 	uint32_t data, default_data;
2357 
2358 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2359 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2360 			     GFX_PIPELINE_PG_ENABLE,
2361 			     enable ? 1 : 0);
2362 	if(default_data != data)
2363 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2364 
2365 	if (!enable)
2366 		/* read any GFX register to wake up GFX */
2367 		data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2368 }
2369 
2370 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2371 						       bool enable)
2372 {
2373 	uint32_t data, default_data;
2374 
2375 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2376 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2377 			     STATIC_PER_CU_PG_ENABLE,
2378 			     enable ? 1 : 0);
2379 	if(default_data != data)
2380 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2381 }
2382 
2383 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2384 						bool enable)
2385 {
2386 	uint32_t data, default_data;
2387 
2388 	default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2389 	data = REG_SET_FIELD(data, RLC_PG_CNTL,
2390 			     DYN_PER_CU_PG_ENABLE,
2391 			     enable ? 1 : 0);
2392 	if(default_data != data)
2393 		WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2394 }
2395 
2396 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2397 {
2398 	gfx_v9_0_init_csb(adev);
2399 
2400 	/*
2401 	 * Rlc save restore list is workable since v2_1.
2402 	 * And it's needed by gfxoff feature.
2403 	 */
2404 	if (adev->gfx.rlc.is_rlc_v2_1) {
2405 		gfx_v9_1_init_rlc_save_restore_list(adev);
2406 		gfx_v9_0_enable_save_restore_machine(adev);
2407 	}
2408 
2409 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2410 			      AMD_PG_SUPPORT_GFX_SMG |
2411 			      AMD_PG_SUPPORT_GFX_DMG |
2412 			      AMD_PG_SUPPORT_CP |
2413 			      AMD_PG_SUPPORT_GDS |
2414 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
2415 		WREG32(mmRLC_JUMP_TABLE_RESTORE,
2416 		       adev->gfx.rlc.cp_table_gpu_addr >> 8);
2417 		gfx_v9_0_init_gfx_power_gating(adev);
2418 	}
2419 }
2420 
2421 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2422 {
2423 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2424 	gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2425 	gfx_v9_0_wait_for_rlc_serdes(adev);
2426 }
2427 
2428 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2429 {
2430 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2431 	udelay(50);
2432 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2433 	udelay(50);
2434 }
2435 
2436 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2437 {
2438 #ifdef AMDGPU_RLC_DEBUG_RETRY
2439 	u32 rlc_ucode_ver;
2440 #endif
2441 
2442 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2443 
2444 	/* carrizo do enable cp interrupt after cp inited */
2445 	if (!(adev->flags & AMD_IS_APU))
2446 		gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2447 
2448 	udelay(50);
2449 
2450 #ifdef AMDGPU_RLC_DEBUG_RETRY
2451 	/* RLC_GPM_GENERAL_6 : RLC Ucode version */
2452 	rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2453 	if(rlc_ucode_ver == 0x108) {
2454 		DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2455 				rlc_ucode_ver, adev->gfx.rlc_fw_version);
2456 		/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2457 		 * default is 0x9C4 to create a 100us interval */
2458 		WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2459 		/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2460 		 * to disable the page fault retry interrupts, default is
2461 		 * 0x100 (256) */
2462 		WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2463 	}
2464 #endif
2465 }
2466 
2467 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2468 {
2469 	const struct rlc_firmware_header_v2_0 *hdr;
2470 	const __le32 *fw_data;
2471 	unsigned i, fw_size;
2472 
2473 	if (!adev->gfx.rlc_fw)
2474 		return -EINVAL;
2475 
2476 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2477 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
2478 
2479 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2480 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2481 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2482 
2483 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2484 			RLCG_UCODE_LOADING_START_ADDRESS);
2485 	for (i = 0; i < fw_size; i++)
2486 		WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2487 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2488 
2489 	return 0;
2490 }
2491 
2492 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2493 {
2494 	int r;
2495 
2496 	if (amdgpu_sriov_vf(adev)) {
2497 		gfx_v9_0_init_csb(adev);
2498 		return 0;
2499 	}
2500 
2501 	gfx_v9_0_rlc_stop(adev);
2502 
2503 	/* disable CG */
2504 	WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2505 
2506 	gfx_v9_0_rlc_reset(adev);
2507 
2508 	gfx_v9_0_init_pg(adev);
2509 
2510 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2511 		/* legacy rlc firmware loading */
2512 		r = gfx_v9_0_rlc_load_microcode(adev);
2513 		if (r)
2514 			return r;
2515 	}
2516 
2517 	if (adev->asic_type == CHIP_RAVEN ||
2518 	    adev->asic_type == CHIP_VEGA20) {
2519 		if (amdgpu_lbpw != 0)
2520 			gfx_v9_0_enable_lbpw(adev, true);
2521 		else
2522 			gfx_v9_0_enable_lbpw(adev, false);
2523 	}
2524 
2525 	gfx_v9_0_rlc_start(adev);
2526 
2527 	return 0;
2528 }
2529 
2530 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2531 {
2532 	int i;
2533 	u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2534 
2535 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2536 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2537 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2538 	if (!enable) {
2539 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2540 			adev->gfx.gfx_ring[i].ready = false;
2541 	}
2542 	WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2543 	udelay(50);
2544 }
2545 
2546 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2547 {
2548 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2549 	const struct gfx_firmware_header_v1_0 *ce_hdr;
2550 	const struct gfx_firmware_header_v1_0 *me_hdr;
2551 	const __le32 *fw_data;
2552 	unsigned i, fw_size;
2553 
2554 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2555 		return -EINVAL;
2556 
2557 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2558 		adev->gfx.pfp_fw->data;
2559 	ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2560 		adev->gfx.ce_fw->data;
2561 	me_hdr = (const struct gfx_firmware_header_v1_0 *)
2562 		adev->gfx.me_fw->data;
2563 
2564 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2565 	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2566 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2567 
2568 	gfx_v9_0_cp_gfx_enable(adev, false);
2569 
2570 	/* PFP */
2571 	fw_data = (const __le32 *)
2572 		(adev->gfx.pfp_fw->data +
2573 		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2574 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2575 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2576 	for (i = 0; i < fw_size; i++)
2577 		WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2578 	WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2579 
2580 	/* CE */
2581 	fw_data = (const __le32 *)
2582 		(adev->gfx.ce_fw->data +
2583 		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2584 	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2585 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2586 	for (i = 0; i < fw_size; i++)
2587 		WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2588 	WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2589 
2590 	/* ME */
2591 	fw_data = (const __le32 *)
2592 		(adev->gfx.me_fw->data +
2593 		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2594 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2595 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2596 	for (i = 0; i < fw_size; i++)
2597 		WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2598 	WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2599 
2600 	return 0;
2601 }
2602 
2603 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2604 {
2605 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2606 	const struct cs_section_def *sect = NULL;
2607 	const struct cs_extent_def *ext = NULL;
2608 	int r, i, tmp;
2609 
2610 	/* init the CP */
2611 	WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2612 	WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2613 
2614 	gfx_v9_0_cp_gfx_enable(adev, true);
2615 
2616 	r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2617 	if (r) {
2618 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2619 		return r;
2620 	}
2621 
2622 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2623 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2624 
2625 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2626 	amdgpu_ring_write(ring, 0x80000000);
2627 	amdgpu_ring_write(ring, 0x80000000);
2628 
2629 	for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2630 		for (ext = sect->section; ext->extent != NULL; ++ext) {
2631 			if (sect->id == SECT_CONTEXT) {
2632 				amdgpu_ring_write(ring,
2633 				       PACKET3(PACKET3_SET_CONTEXT_REG,
2634 					       ext->reg_count));
2635 				amdgpu_ring_write(ring,
2636 				       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2637 				for (i = 0; i < ext->reg_count; i++)
2638 					amdgpu_ring_write(ring, ext->extent[i]);
2639 			}
2640 		}
2641 	}
2642 
2643 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2644 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2645 
2646 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2647 	amdgpu_ring_write(ring, 0);
2648 
2649 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2650 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2651 	amdgpu_ring_write(ring, 0x8000);
2652 	amdgpu_ring_write(ring, 0x8000);
2653 
2654 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2655 	tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2656 		(SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2657 	amdgpu_ring_write(ring, tmp);
2658 	amdgpu_ring_write(ring, 0);
2659 
2660 	amdgpu_ring_commit(ring);
2661 
2662 	return 0;
2663 }
2664 
2665 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2666 {
2667 	struct amdgpu_ring *ring;
2668 	u32 tmp;
2669 	u32 rb_bufsz;
2670 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
2671 
2672 	/* Set the write pointer delay */
2673 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2674 
2675 	/* set the RB to use vmid 0 */
2676 	WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2677 
2678 	/* Set ring buffer size */
2679 	ring = &adev->gfx.gfx_ring[0];
2680 	rb_bufsz = order_base_2(ring->ring_size / 8);
2681 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2682 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2683 #ifdef __BIG_ENDIAN
2684 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2685 #endif
2686 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2687 
2688 	/* Initialize the ring buffer's write pointers */
2689 	ring->wptr = 0;
2690 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2691 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2692 
2693 	/* set the wb address wether it's enabled or not */
2694 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2695 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2696 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2697 
2698 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2699 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2700 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2701 
2702 	mdelay(1);
2703 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2704 
2705 	rb_addr = ring->gpu_addr >> 8;
2706 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2707 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2708 
2709 	tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2710 	if (ring->use_doorbell) {
2711 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2712 				    DOORBELL_OFFSET, ring->doorbell_index);
2713 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2714 				    DOORBELL_EN, 1);
2715 	} else {
2716 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2717 	}
2718 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2719 
2720 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2721 			DOORBELL_RANGE_LOWER, ring->doorbell_index);
2722 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2723 
2724 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2725 		       CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2726 
2727 
2728 	/* start the ring */
2729 	gfx_v9_0_cp_gfx_start(adev);
2730 	ring->ready = true;
2731 
2732 	return 0;
2733 }
2734 
2735 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2736 {
2737 	int i;
2738 
2739 	if (enable) {
2740 		WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2741 	} else {
2742 		WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2743 			(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2744 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
2745 			adev->gfx.compute_ring[i].ready = false;
2746 		adev->gfx.kiq.ring.ready = false;
2747 	}
2748 	udelay(50);
2749 }
2750 
2751 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2752 {
2753 	const struct gfx_firmware_header_v1_0 *mec_hdr;
2754 	const __le32 *fw_data;
2755 	unsigned i;
2756 	u32 tmp;
2757 
2758 	if (!adev->gfx.mec_fw)
2759 		return -EINVAL;
2760 
2761 	gfx_v9_0_cp_compute_enable(adev, false);
2762 
2763 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2764 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2765 
2766 	fw_data = (const __le32 *)
2767 		(adev->gfx.mec_fw->data +
2768 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2769 	tmp = 0;
2770 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2771 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2772 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2773 
2774 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2775 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2776 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2777 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2778 
2779 	/* MEC1 */
2780 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2781 			 mec_hdr->jt_offset);
2782 	for (i = 0; i < mec_hdr->jt_size; i++)
2783 		WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2784 			le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2785 
2786 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2787 			adev->gfx.mec_fw_version);
2788 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2789 
2790 	return 0;
2791 }
2792 
2793 /* KIQ functions */
2794 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2795 {
2796 	uint32_t tmp;
2797 	struct amdgpu_device *adev = ring->adev;
2798 
2799 	/* tell RLC which is KIQ queue */
2800 	tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2801 	tmp &= 0xffffff00;
2802 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2803 	WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2804 	tmp |= 0x80;
2805 	WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2806 }
2807 
2808 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2809 {
2810 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2811 	uint64_t queue_mask = 0;
2812 	int r, i;
2813 
2814 	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2815 		if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2816 			continue;
2817 
2818 		/* This situation may be hit in the future if a new HW
2819 		 * generation exposes more than 64 queues. If so, the
2820 		 * definition of queue_mask needs updating */
2821 		if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2822 			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2823 			break;
2824 		}
2825 
2826 		queue_mask |= (1ull << i);
2827 	}
2828 
2829 	r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
2830 	if (r) {
2831 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2832 		return r;
2833 	}
2834 
2835 	/* set resources */
2836 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2837 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2838 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
2839 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
2840 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
2841 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
2842 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
2843 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
2844 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
2845 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2846 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2847 		uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2848 		uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2849 
2850 		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2851 		/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2852 		amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2853 				  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2854 				  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2855 				  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2856 				  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2857 				  PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2858 				  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2859 				  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
2860 				  PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2861 				  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2862 		amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2863 		amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2864 		amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2865 		amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2866 		amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2867 	}
2868 
2869 	r = amdgpu_ring_test_ring(kiq_ring);
2870 	if (r) {
2871 		DRM_ERROR("KCQ enable failed\n");
2872 		kiq_ring->ready = false;
2873 	}
2874 
2875 	return r;
2876 }
2877 
2878 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2879 {
2880 	struct amdgpu_device *adev = ring->adev;
2881 	struct v9_mqd *mqd = ring->mqd_ptr;
2882 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2883 	uint32_t tmp;
2884 
2885 	mqd->header = 0xC0310800;
2886 	mqd->compute_pipelinestat_enable = 0x00000001;
2887 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2888 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2889 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2890 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2891 	mqd->compute_misc_reserved = 0x00000003;
2892 
2893 	mqd->dynamic_cu_mask_addr_lo =
2894 		lower_32_bits(ring->mqd_gpu_addr
2895 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2896 	mqd->dynamic_cu_mask_addr_hi =
2897 		upper_32_bits(ring->mqd_gpu_addr
2898 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2899 
2900 	eop_base_addr = ring->eop_gpu_addr >> 8;
2901 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2902 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2903 
2904 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2905 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2906 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2907 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2908 
2909 	mqd->cp_hqd_eop_control = tmp;
2910 
2911 	/* enable doorbell? */
2912 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2913 
2914 	if (ring->use_doorbell) {
2915 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2916 				    DOORBELL_OFFSET, ring->doorbell_index);
2917 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2918 				    DOORBELL_EN, 1);
2919 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2920 				    DOORBELL_SOURCE, 0);
2921 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2922 				    DOORBELL_HIT, 0);
2923 	} else {
2924 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2925 					 DOORBELL_EN, 0);
2926 	}
2927 
2928 	mqd->cp_hqd_pq_doorbell_control = tmp;
2929 
2930 	/* disable the queue if it's active */
2931 	ring->wptr = 0;
2932 	mqd->cp_hqd_dequeue_request = 0;
2933 	mqd->cp_hqd_pq_rptr = 0;
2934 	mqd->cp_hqd_pq_wptr_lo = 0;
2935 	mqd->cp_hqd_pq_wptr_hi = 0;
2936 
2937 	/* set the pointer to the MQD */
2938 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2939 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2940 
2941 	/* set MQD vmid to 0 */
2942 	tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2943 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2944 	mqd->cp_mqd_control = tmp;
2945 
2946 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2947 	hqd_gpu_addr = ring->gpu_addr >> 8;
2948 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2949 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2950 
2951 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2952 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2953 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2954 			    (order_base_2(ring->ring_size / 4) - 1));
2955 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2956 			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2957 #ifdef __BIG_ENDIAN
2958 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2959 #endif
2960 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2961 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2962 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2963 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2964 	mqd->cp_hqd_pq_control = tmp;
2965 
2966 	/* set the wb address whether it's enabled or not */
2967 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2968 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2969 	mqd->cp_hqd_pq_rptr_report_addr_hi =
2970 		upper_32_bits(wb_gpu_addr) & 0xffff;
2971 
2972 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2973 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2974 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2975 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2976 
2977 	tmp = 0;
2978 	/* enable the doorbell if requested */
2979 	if (ring->use_doorbell) {
2980 		tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2981 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2982 				DOORBELL_OFFSET, ring->doorbell_index);
2983 
2984 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2985 					 DOORBELL_EN, 1);
2986 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2987 					 DOORBELL_SOURCE, 0);
2988 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2989 					 DOORBELL_HIT, 0);
2990 	}
2991 
2992 	mqd->cp_hqd_pq_doorbell_control = tmp;
2993 
2994 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2995 	ring->wptr = 0;
2996 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2997 
2998 	/* set the vmid for the queue */
2999 	mqd->cp_hqd_vmid = 0;
3000 
3001 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3002 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3003 	mqd->cp_hqd_persistent_state = tmp;
3004 
3005 	/* set MIN_IB_AVAIL_SIZE */
3006 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3007 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3008 	mqd->cp_hqd_ib_control = tmp;
3009 
3010 	/* activate the queue */
3011 	mqd->cp_hqd_active = 1;
3012 
3013 	return 0;
3014 }
3015 
3016 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3017 {
3018 	struct amdgpu_device *adev = ring->adev;
3019 	struct v9_mqd *mqd = ring->mqd_ptr;
3020 	int j;
3021 
3022 	/* disable wptr polling */
3023 	WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3024 
3025 	WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3026 	       mqd->cp_hqd_eop_base_addr_lo);
3027 	WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3028 	       mqd->cp_hqd_eop_base_addr_hi);
3029 
3030 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3031 	WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
3032 	       mqd->cp_hqd_eop_control);
3033 
3034 	/* enable doorbell? */
3035 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3036 	       mqd->cp_hqd_pq_doorbell_control);
3037 
3038 	/* disable the queue if it's active */
3039 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3040 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3041 		for (j = 0; j < adev->usec_timeout; j++) {
3042 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3043 				break;
3044 			udelay(1);
3045 		}
3046 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3047 		       mqd->cp_hqd_dequeue_request);
3048 		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3049 		       mqd->cp_hqd_pq_rptr);
3050 		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3051 		       mqd->cp_hqd_pq_wptr_lo);
3052 		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3053 		       mqd->cp_hqd_pq_wptr_hi);
3054 	}
3055 
3056 	/* set the pointer to the MQD */
3057 	WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3058 	       mqd->cp_mqd_base_addr_lo);
3059 	WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3060 	       mqd->cp_mqd_base_addr_hi);
3061 
3062 	/* set MQD vmid to 0 */
3063 	WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3064 	       mqd->cp_mqd_control);
3065 
3066 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3067 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3068 	       mqd->cp_hqd_pq_base_lo);
3069 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3070 	       mqd->cp_hqd_pq_base_hi);
3071 
3072 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3073 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3074 	       mqd->cp_hqd_pq_control);
3075 
3076 	/* set the wb address whether it's enabled or not */
3077 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3078 				mqd->cp_hqd_pq_rptr_report_addr_lo);
3079 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3080 				mqd->cp_hqd_pq_rptr_report_addr_hi);
3081 
3082 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3083 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3084 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
3085 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3086 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
3087 
3088 	/* enable the doorbell if requested */
3089 	if (ring->use_doorbell) {
3090 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3091 					(AMDGPU_DOORBELL64_KIQ *2) << 2);
3092 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3093 					(AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
3094 	}
3095 
3096 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3097 	       mqd->cp_hqd_pq_doorbell_control);
3098 
3099 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3100 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3101 	       mqd->cp_hqd_pq_wptr_lo);
3102 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3103 	       mqd->cp_hqd_pq_wptr_hi);
3104 
3105 	/* set the vmid for the queue */
3106 	WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3107 
3108 	WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3109 	       mqd->cp_hqd_persistent_state);
3110 
3111 	/* activate the queue */
3112 	WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3113 	       mqd->cp_hqd_active);
3114 
3115 	if (ring->use_doorbell)
3116 		WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3117 
3118 	return 0;
3119 }
3120 
3121 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3122 {
3123 	struct amdgpu_device *adev = ring->adev;
3124 	int j;
3125 
3126 	/* disable the queue if it's active */
3127 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3128 
3129 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3130 
3131 		for (j = 0; j < adev->usec_timeout; j++) {
3132 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3133 				break;
3134 			udelay(1);
3135 		}
3136 
3137 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3138 			DRM_DEBUG("KIQ dequeue request failed.\n");
3139 
3140 			/* Manual disable if dequeue request times out */
3141 			WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
3142 		}
3143 
3144 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3145 		      0);
3146 	}
3147 
3148 	WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3149 	WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3150 	WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3151 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3152 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3153 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3154 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3155 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3156 
3157 	return 0;
3158 }
3159 
3160 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3161 {
3162 	struct amdgpu_device *adev = ring->adev;
3163 	struct v9_mqd *mqd = ring->mqd_ptr;
3164 	int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3165 
3166 	gfx_v9_0_kiq_setting(ring);
3167 
3168 	if (adev->in_gpu_reset) { /* for GPU_RESET case */
3169 		/* reset MQD to a clean status */
3170 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3171 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3172 
3173 		/* reset ring buffer */
3174 		ring->wptr = 0;
3175 		amdgpu_ring_clear_ring(ring);
3176 
3177 		mutex_lock(&adev->srbm_mutex);
3178 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3179 		gfx_v9_0_kiq_init_register(ring);
3180 		soc15_grbm_select(adev, 0, 0, 0, 0);
3181 		mutex_unlock(&adev->srbm_mutex);
3182 	} else {
3183 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3184 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3185 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3186 		mutex_lock(&adev->srbm_mutex);
3187 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3188 		gfx_v9_0_mqd_init(ring);
3189 		gfx_v9_0_kiq_init_register(ring);
3190 		soc15_grbm_select(adev, 0, 0, 0, 0);
3191 		mutex_unlock(&adev->srbm_mutex);
3192 
3193 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3194 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3195 	}
3196 
3197 	return 0;
3198 }
3199 
3200 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3201 {
3202 	struct amdgpu_device *adev = ring->adev;
3203 	struct v9_mqd *mqd = ring->mqd_ptr;
3204 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
3205 
3206 	if (!adev->in_gpu_reset && !adev->in_suspend) {
3207 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3208 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3209 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3210 		mutex_lock(&adev->srbm_mutex);
3211 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3212 		gfx_v9_0_mqd_init(ring);
3213 		soc15_grbm_select(adev, 0, 0, 0, 0);
3214 		mutex_unlock(&adev->srbm_mutex);
3215 
3216 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3217 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3218 	} else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3219 		/* reset MQD to a clean status */
3220 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3221 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3222 
3223 		/* reset ring buffer */
3224 		ring->wptr = 0;
3225 		amdgpu_ring_clear_ring(ring);
3226 	} else {
3227 		amdgpu_ring_clear_ring(ring);
3228 	}
3229 
3230 	return 0;
3231 }
3232 
3233 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3234 {
3235 	struct amdgpu_ring *ring;
3236 	int r;
3237 
3238 	ring = &adev->gfx.kiq.ring;
3239 
3240 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
3241 	if (unlikely(r != 0))
3242 		return r;
3243 
3244 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3245 	if (unlikely(r != 0))
3246 		return r;
3247 
3248 	gfx_v9_0_kiq_init_queue(ring);
3249 	amdgpu_bo_kunmap(ring->mqd_obj);
3250 	ring->mqd_ptr = NULL;
3251 	amdgpu_bo_unreserve(ring->mqd_obj);
3252 	ring->ready = true;
3253 	return 0;
3254 }
3255 
3256 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3257 {
3258 	struct amdgpu_ring *ring = NULL;
3259 	int r = 0, i;
3260 
3261 	gfx_v9_0_cp_compute_enable(adev, true);
3262 
3263 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3264 		ring = &adev->gfx.compute_ring[i];
3265 
3266 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
3267 		if (unlikely(r != 0))
3268 			goto done;
3269 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3270 		if (!r) {
3271 			r = gfx_v9_0_kcq_init_queue(ring);
3272 			amdgpu_bo_kunmap(ring->mqd_obj);
3273 			ring->mqd_ptr = NULL;
3274 		}
3275 		amdgpu_bo_unreserve(ring->mqd_obj);
3276 		if (r)
3277 			goto done;
3278 	}
3279 
3280 	r = gfx_v9_0_kiq_kcq_enable(adev);
3281 done:
3282 	return r;
3283 }
3284 
3285 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3286 {
3287 	int r, i;
3288 	struct amdgpu_ring *ring;
3289 
3290 	if (!(adev->flags & AMD_IS_APU))
3291 		gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3292 
3293 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3294 		/* legacy firmware loading */
3295 		r = gfx_v9_0_cp_gfx_load_microcode(adev);
3296 		if (r)
3297 			return r;
3298 
3299 		r = gfx_v9_0_cp_compute_load_microcode(adev);
3300 		if (r)
3301 			return r;
3302 	}
3303 
3304 	r = gfx_v9_0_kiq_resume(adev);
3305 	if (r)
3306 		return r;
3307 
3308 	r = gfx_v9_0_cp_gfx_resume(adev);
3309 	if (r)
3310 		return r;
3311 
3312 	r = gfx_v9_0_kcq_resume(adev);
3313 	if (r)
3314 		return r;
3315 
3316 	ring = &adev->gfx.gfx_ring[0];
3317 	r = amdgpu_ring_test_ring(ring);
3318 	if (r) {
3319 		ring->ready = false;
3320 		return r;
3321 	}
3322 
3323 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3324 		ring = &adev->gfx.compute_ring[i];
3325 
3326 		ring->ready = true;
3327 		r = amdgpu_ring_test_ring(ring);
3328 		if (r)
3329 			ring->ready = false;
3330 	}
3331 
3332 	gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3333 
3334 	return 0;
3335 }
3336 
3337 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3338 {
3339 	gfx_v9_0_cp_gfx_enable(adev, enable);
3340 	gfx_v9_0_cp_compute_enable(adev, enable);
3341 }
3342 
3343 static int gfx_v9_0_hw_init(void *handle)
3344 {
3345 	int r;
3346 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3347 
3348 	gfx_v9_0_init_golden_registers(adev);
3349 
3350 	gfx_v9_0_constants_init(adev);
3351 
3352 	r = gfx_v9_0_csb_vram_pin(adev);
3353 	if (r)
3354 		return r;
3355 
3356 	r = gfx_v9_0_rlc_resume(adev);
3357 	if (r)
3358 		return r;
3359 
3360 	r = gfx_v9_0_cp_resume(adev);
3361 	if (r)
3362 		return r;
3363 
3364 	r = gfx_v9_0_ngg_en(adev);
3365 	if (r)
3366 		return r;
3367 
3368 	return r;
3369 }
3370 
3371 static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
3372 {
3373 	int r, i;
3374 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3375 
3376 	r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
3377 	if (r)
3378 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3379 
3380 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3381 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3382 
3383 		amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
3384 		amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3385 						PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
3386 						PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
3387 						PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
3388 						PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
3389 		amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
3390 		amdgpu_ring_write(kiq_ring, 0);
3391 		amdgpu_ring_write(kiq_ring, 0);
3392 		amdgpu_ring_write(kiq_ring, 0);
3393 	}
3394 	r = amdgpu_ring_test_ring(kiq_ring);
3395 	if (r)
3396 		DRM_ERROR("KCQ disable failed\n");
3397 
3398 	return r;
3399 }
3400 
3401 static int gfx_v9_0_hw_fini(void *handle)
3402 {
3403 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3404 
3405 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3406 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3407 
3408 	/* disable KCQ to avoid CPC touch memory not valid anymore */
3409 	gfx_v9_0_kcq_disable(adev);
3410 
3411 	if (amdgpu_sriov_vf(adev)) {
3412 		gfx_v9_0_cp_gfx_enable(adev, false);
3413 		/* must disable polling for SRIOV when hw finished, otherwise
3414 		 * CPC engine may still keep fetching WB address which is already
3415 		 * invalid after sw finished and trigger DMAR reading error in
3416 		 * hypervisor side.
3417 		 */
3418 		WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3419 		return 0;
3420 	}
3421 
3422 	/* Use deinitialize sequence from CAIL when unbinding device from driver,
3423 	 * otherwise KIQ is hanging when binding back
3424 	 */
3425 	if (!adev->in_gpu_reset && !adev->in_suspend) {
3426 		mutex_lock(&adev->srbm_mutex);
3427 		soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3428 				adev->gfx.kiq.ring.pipe,
3429 				adev->gfx.kiq.ring.queue, 0);
3430 		gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3431 		soc15_grbm_select(adev, 0, 0, 0, 0);
3432 		mutex_unlock(&adev->srbm_mutex);
3433 	}
3434 
3435 	gfx_v9_0_cp_enable(adev, false);
3436 	gfx_v9_0_rlc_stop(adev);
3437 
3438 	gfx_v9_0_csb_vram_unpin(adev);
3439 
3440 	return 0;
3441 }
3442 
3443 static int gfx_v9_0_suspend(void *handle)
3444 {
3445 	return gfx_v9_0_hw_fini(handle);
3446 }
3447 
3448 static int gfx_v9_0_resume(void *handle)
3449 {
3450 	return gfx_v9_0_hw_init(handle);
3451 }
3452 
3453 static bool gfx_v9_0_is_idle(void *handle)
3454 {
3455 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3456 
3457 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3458 				GRBM_STATUS, GUI_ACTIVE))
3459 		return false;
3460 	else
3461 		return true;
3462 }
3463 
3464 static int gfx_v9_0_wait_for_idle(void *handle)
3465 {
3466 	unsigned i;
3467 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3468 
3469 	for (i = 0; i < adev->usec_timeout; i++) {
3470 		if (gfx_v9_0_is_idle(handle))
3471 			return 0;
3472 		udelay(1);
3473 	}
3474 	return -ETIMEDOUT;
3475 }
3476 
3477 static int gfx_v9_0_soft_reset(void *handle)
3478 {
3479 	u32 grbm_soft_reset = 0;
3480 	u32 tmp;
3481 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3482 
3483 	/* GRBM_STATUS */
3484 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3485 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3486 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3487 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3488 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3489 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3490 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3491 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3492 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3493 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3494 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3495 	}
3496 
3497 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3498 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3499 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3500 	}
3501 
3502 	/* GRBM_STATUS2 */
3503 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3504 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3505 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3506 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3507 
3508 
3509 	if (grbm_soft_reset) {
3510 		/* stop the rlc */
3511 		gfx_v9_0_rlc_stop(adev);
3512 
3513 		/* Disable GFX parsing/prefetching */
3514 		gfx_v9_0_cp_gfx_enable(adev, false);
3515 
3516 		/* Disable MEC parsing/prefetching */
3517 		gfx_v9_0_cp_compute_enable(adev, false);
3518 
3519 		if (grbm_soft_reset) {
3520 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3521 			tmp |= grbm_soft_reset;
3522 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3523 			WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3524 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3525 
3526 			udelay(50);
3527 
3528 			tmp &= ~grbm_soft_reset;
3529 			WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3530 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3531 		}
3532 
3533 		/* Wait a little for things to settle down */
3534 		udelay(50);
3535 	}
3536 	return 0;
3537 }
3538 
3539 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3540 {
3541 	uint64_t clock;
3542 
3543 	mutex_lock(&adev->gfx.gpu_clock_mutex);
3544 	WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3545 	clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3546 		((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3547 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
3548 	return clock;
3549 }
3550 
3551 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3552 					  uint32_t vmid,
3553 					  uint32_t gds_base, uint32_t gds_size,
3554 					  uint32_t gws_base, uint32_t gws_size,
3555 					  uint32_t oa_base, uint32_t oa_size)
3556 {
3557 	struct amdgpu_device *adev = ring->adev;
3558 
3559 	/* GDS Base */
3560 	gfx_v9_0_write_data_to_reg(ring, 0, false,
3561 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3562 				   gds_base);
3563 
3564 	/* GDS Size */
3565 	gfx_v9_0_write_data_to_reg(ring, 0, false,
3566 				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3567 				   gds_size);
3568 
3569 	/* GWS */
3570 	gfx_v9_0_write_data_to_reg(ring, 0, false,
3571 				   SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3572 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3573 
3574 	/* OA */
3575 	gfx_v9_0_write_data_to_reg(ring, 0, false,
3576 				   SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3577 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
3578 }
3579 
3580 static int gfx_v9_0_early_init(void *handle)
3581 {
3582 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3583 
3584 	adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3585 	adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3586 	gfx_v9_0_set_ring_funcs(adev);
3587 	gfx_v9_0_set_irq_funcs(adev);
3588 	gfx_v9_0_set_gds_init(adev);
3589 	gfx_v9_0_set_rlc_funcs(adev);
3590 
3591 	return 0;
3592 }
3593 
3594 static int gfx_v9_0_late_init(void *handle)
3595 {
3596 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3597 	int r;
3598 
3599 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3600 	if (r)
3601 		return r;
3602 
3603 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3604 	if (r)
3605 		return r;
3606 
3607 	return 0;
3608 }
3609 
3610 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3611 {
3612 	uint32_t rlc_setting, data;
3613 	unsigned i;
3614 
3615 	if (adev->gfx.rlc.in_safe_mode)
3616 		return;
3617 
3618 	/* if RLC is not enabled, do nothing */
3619 	rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3620 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3621 		return;
3622 
3623 	if (adev->cg_flags &
3624 	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
3625 	     AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3626 		data = RLC_SAFE_MODE__CMD_MASK;
3627 		data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3628 		WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3629 
3630 		/* wait for RLC_SAFE_MODE */
3631 		for (i = 0; i < adev->usec_timeout; i++) {
3632 			if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3633 				break;
3634 			udelay(1);
3635 		}
3636 		adev->gfx.rlc.in_safe_mode = true;
3637 	}
3638 }
3639 
3640 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3641 {
3642 	uint32_t rlc_setting, data;
3643 
3644 	if (!adev->gfx.rlc.in_safe_mode)
3645 		return;
3646 
3647 	/* if RLC is not enabled, do nothing */
3648 	rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3649 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3650 		return;
3651 
3652 	if (adev->cg_flags &
3653 	    (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
3654 		/*
3655 		 * Try to exit safe mode only if it is already in safe
3656 		 * mode.
3657 		 */
3658 		data = RLC_SAFE_MODE__CMD_MASK;
3659 		WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3660 		adev->gfx.rlc.in_safe_mode = false;
3661 	}
3662 }
3663 
3664 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3665 						bool enable)
3666 {
3667 	gfx_v9_0_enter_rlc_safe_mode(adev);
3668 
3669 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3670 		gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3671 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3672 			gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3673 	} else {
3674 		gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3675 		gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3676 	}
3677 
3678 	gfx_v9_0_exit_rlc_safe_mode(adev);
3679 }
3680 
3681 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3682 						bool enable)
3683 {
3684 	/* TODO: double check if we need to perform under safe mode */
3685 	/* gfx_v9_0_enter_rlc_safe_mode(adev); */
3686 
3687 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3688 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3689 	else
3690 		gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3691 
3692 	if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3693 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3694 	else
3695 		gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3696 
3697 	/* gfx_v9_0_exit_rlc_safe_mode(adev); */
3698 }
3699 
3700 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3701 						      bool enable)
3702 {
3703 	uint32_t data, def;
3704 
3705 	/* It is disabled by HW by default */
3706 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3707 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
3708 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3709 
3710 		if (adev->asic_type != CHIP_VEGA12)
3711 			data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3712 
3713 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3714 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3715 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3716 
3717 		/* only for Vega10 & Raven1 */
3718 		data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3719 
3720 		if (def != data)
3721 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3722 
3723 		/* MGLS is a global flag to control all MGLS in GFX */
3724 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3725 			/* 2 - RLC memory Light sleep */
3726 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3727 				def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3728 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3729 				if (def != data)
3730 					WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3731 			}
3732 			/* 3 - CP memory Light sleep */
3733 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3734 				def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3735 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3736 				if (def != data)
3737 					WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3738 			}
3739 		}
3740 	} else {
3741 		/* 1 - MGCG_OVERRIDE */
3742 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3743 
3744 		if (adev->asic_type != CHIP_VEGA12)
3745 			data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3746 
3747 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3748 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3749 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3750 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3751 
3752 		if (def != data)
3753 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3754 
3755 		/* 2 - disable MGLS in RLC */
3756 		data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3757 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3758 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3759 			WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3760 		}
3761 
3762 		/* 3 - disable MGLS in CP */
3763 		data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3764 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3765 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3766 			WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3767 		}
3768 	}
3769 }
3770 
3771 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3772 					   bool enable)
3773 {
3774 	uint32_t data, def;
3775 
3776 	adev->gfx.rlc.funcs->enter_safe_mode(adev);
3777 
3778 	/* Enable 3D CGCG/CGLS */
3779 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3780 		/* write cmd to clear cgcg/cgls ov */
3781 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3782 		/* unset CGCG override */
3783 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3784 		/* update CGCG and CGLS override bits */
3785 		if (def != data)
3786 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3787 
3788 		/* enable 3Dcgcg FSM(0x0000363f) */
3789 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3790 
3791 		data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3792 			RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3793 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3794 			data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3795 				RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3796 		if (def != data)
3797 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3798 
3799 		/* set IDLE_POLL_COUNT(0x00900100) */
3800 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3801 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3802 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3803 		if (def != data)
3804 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3805 	} else {
3806 		/* Disable CGCG/CGLS */
3807 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3808 		/* disable cgcg, cgls should be disabled */
3809 		data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3810 			  RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3811 		/* disable cgcg and cgls in FSM */
3812 		if (def != data)
3813 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3814 	}
3815 
3816 	adev->gfx.rlc.funcs->exit_safe_mode(adev);
3817 }
3818 
3819 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3820 						      bool enable)
3821 {
3822 	uint32_t def, data;
3823 
3824 	adev->gfx.rlc.funcs->enter_safe_mode(adev);
3825 
3826 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3827 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3828 		/* unset CGCG override */
3829 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3830 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3831 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3832 		else
3833 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3834 		/* update CGCG and CGLS override bits */
3835 		if (def != data)
3836 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3837 
3838 		/* enable cgcg FSM(0x0000363F) */
3839 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3840 
3841 		data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3842 			RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3843 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3844 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3845 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3846 		if (def != data)
3847 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3848 
3849 		/* set IDLE_POLL_COUNT(0x00900100) */
3850 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3851 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3852 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3853 		if (def != data)
3854 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3855 	} else {
3856 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3857 		/* reset CGCG/CGLS bits */
3858 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3859 		/* disable cgcg and cgls in FSM */
3860 		if (def != data)
3861 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3862 	}
3863 
3864 	adev->gfx.rlc.funcs->exit_safe_mode(adev);
3865 }
3866 
3867 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3868 					    bool enable)
3869 {
3870 	if (enable) {
3871 		/* CGCG/CGLS should be enabled after MGCG/MGLS
3872 		 * ===  MGCG + MGLS ===
3873 		 */
3874 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3875 		/* ===  CGCG /CGLS for GFX 3D Only === */
3876 		gfx_v9_0_update_3d_clock_gating(adev, enable);
3877 		/* ===  CGCG + CGLS === */
3878 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3879 	} else {
3880 		/* CGCG/CGLS should be disabled before MGCG/MGLS
3881 		 * ===  CGCG + CGLS ===
3882 		 */
3883 		gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3884 		/* ===  CGCG /CGLS for GFX 3D Only === */
3885 		gfx_v9_0_update_3d_clock_gating(adev, enable);
3886 		/* ===  MGCG + MGLS === */
3887 		gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3888 	}
3889 	return 0;
3890 }
3891 
3892 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3893 	.enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
3894 	.exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
3895 };
3896 
3897 static int gfx_v9_0_set_powergating_state(void *handle,
3898 					  enum amd_powergating_state state)
3899 {
3900 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3901 	bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3902 
3903 	switch (adev->asic_type) {
3904 	case CHIP_RAVEN:
3905 		if (!enable) {
3906 			amdgpu_gfx_off_ctrl(adev, false);
3907 			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
3908 		}
3909 		if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3910 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3911 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3912 		} else {
3913 			gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3914 			gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3915 		}
3916 
3917 		if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3918 			gfx_v9_0_enable_cp_power_gating(adev, true);
3919 		else
3920 			gfx_v9_0_enable_cp_power_gating(adev, false);
3921 
3922 		/* update gfx cgpg state */
3923 		gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3924 
3925 		/* update mgcg state */
3926 		gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3927 
3928 		if (enable)
3929 			amdgpu_gfx_off_ctrl(adev, true);
3930 		break;
3931 	case CHIP_VEGA12:
3932 		if (!enable) {
3933 			amdgpu_gfx_off_ctrl(adev, false);
3934 			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
3935 		} else {
3936 			amdgpu_gfx_off_ctrl(adev, true);
3937 		}
3938 		break;
3939 	default:
3940 		break;
3941 	}
3942 
3943 	return 0;
3944 }
3945 
3946 static int gfx_v9_0_set_clockgating_state(void *handle,
3947 					  enum amd_clockgating_state state)
3948 {
3949 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3950 
3951 	if (amdgpu_sriov_vf(adev))
3952 		return 0;
3953 
3954 	switch (adev->asic_type) {
3955 	case CHIP_VEGA10:
3956 	case CHIP_VEGA12:
3957 	case CHIP_VEGA20:
3958 	case CHIP_RAVEN:
3959 		gfx_v9_0_update_gfx_clock_gating(adev,
3960 						 state == AMD_CG_STATE_GATE ? true : false);
3961 		break;
3962 	default:
3963 		break;
3964 	}
3965 	return 0;
3966 }
3967 
3968 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
3969 {
3970 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3971 	int data;
3972 
3973 	if (amdgpu_sriov_vf(adev))
3974 		*flags = 0;
3975 
3976 	/* AMD_CG_SUPPORT_GFX_MGCG */
3977 	data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3978 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3979 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
3980 
3981 	/* AMD_CG_SUPPORT_GFX_CGCG */
3982 	data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3983 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3984 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
3985 
3986 	/* AMD_CG_SUPPORT_GFX_CGLS */
3987 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3988 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
3989 
3990 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
3991 	data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3992 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
3993 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
3994 
3995 	/* AMD_CG_SUPPORT_GFX_CP_LS */
3996 	data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3997 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
3998 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
3999 
4000 	/* AMD_CG_SUPPORT_GFX_3D_CGCG */
4001 	data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4002 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4003 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4004 
4005 	/* AMD_CG_SUPPORT_GFX_3D_CGLS */
4006 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4007 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4008 }
4009 
4010 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4011 {
4012 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
4013 }
4014 
4015 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4016 {
4017 	struct amdgpu_device *adev = ring->adev;
4018 	u64 wptr;
4019 
4020 	/* XXX check if swapping is necessary on BE */
4021 	if (ring->use_doorbell) {
4022 		wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4023 	} else {
4024 		wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4025 		wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4026 	}
4027 
4028 	return wptr;
4029 }
4030 
4031 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4032 {
4033 	struct amdgpu_device *adev = ring->adev;
4034 
4035 	if (ring->use_doorbell) {
4036 		/* XXX check if swapping is necessary on BE */
4037 		atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4038 		WDOORBELL64(ring->doorbell_index, ring->wptr);
4039 	} else {
4040 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4041 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4042 	}
4043 }
4044 
4045 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4046 {
4047 	struct amdgpu_device *adev = ring->adev;
4048 	u32 ref_and_mask, reg_mem_engine;
4049 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
4050 
4051 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4052 		switch (ring->me) {
4053 		case 1:
4054 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4055 			break;
4056 		case 2:
4057 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4058 			break;
4059 		default:
4060 			return;
4061 		}
4062 		reg_mem_engine = 0;
4063 	} else {
4064 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4065 		reg_mem_engine = 1; /* pfp */
4066 	}
4067 
4068 	gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4069 			      adev->nbio_funcs->get_hdp_flush_req_offset(adev),
4070 			      adev->nbio_funcs->get_hdp_flush_done_offset(adev),
4071 			      ref_and_mask, ref_and_mask, 0x20);
4072 }
4073 
4074 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4075                                       struct amdgpu_ib *ib,
4076                                       unsigned vmid, bool ctx_switch)
4077 {
4078 	u32 header, control = 0;
4079 
4080 	if (ib->flags & AMDGPU_IB_FLAG_CE)
4081 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
4082 	else
4083 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4084 
4085 	control |= ib->length_dw | (vmid << 24);
4086 
4087 	if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4088 		control |= INDIRECT_BUFFER_PRE_ENB(1);
4089 
4090 		if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4091 			gfx_v9_0_ring_emit_de_meta(ring);
4092 	}
4093 
4094 	amdgpu_ring_write(ring, header);
4095 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4096 	amdgpu_ring_write(ring,
4097 #ifdef __BIG_ENDIAN
4098 		(2 << 0) |
4099 #endif
4100 		lower_32_bits(ib->gpu_addr));
4101 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4102 	amdgpu_ring_write(ring, control);
4103 }
4104 
4105 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4106                                           struct amdgpu_ib *ib,
4107                                           unsigned vmid, bool ctx_switch)
4108 {
4109         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4110 
4111         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4112 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4113         amdgpu_ring_write(ring,
4114 #ifdef __BIG_ENDIAN
4115                                 (2 << 0) |
4116 #endif
4117                                 lower_32_bits(ib->gpu_addr));
4118         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4119         amdgpu_ring_write(ring, control);
4120 }
4121 
4122 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4123 				     u64 seq, unsigned flags)
4124 {
4125 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4126 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4127 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
4128 
4129 	/* RELEASE_MEM - flush caches, send int */
4130 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4131 	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
4132 					       EOP_TC_NC_ACTION_EN) :
4133 					      (EOP_TCL1_ACTION_EN |
4134 					       EOP_TC_ACTION_EN |
4135 					       EOP_TC_WB_ACTION_EN |
4136 					       EOP_TC_MD_ACTION_EN)) |
4137 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4138 				 EVENT_INDEX(5)));
4139 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
4140 
4141 	/*
4142 	 * the address should be Qword aligned if 64bit write, Dword
4143 	 * aligned if only send 32bit data low (discard data high)
4144 	 */
4145 	if (write64bit)
4146 		BUG_ON(addr & 0x7);
4147 	else
4148 		BUG_ON(addr & 0x3);
4149 	amdgpu_ring_write(ring, lower_32_bits(addr));
4150 	amdgpu_ring_write(ring, upper_32_bits(addr));
4151 	amdgpu_ring_write(ring, lower_32_bits(seq));
4152 	amdgpu_ring_write(ring, upper_32_bits(seq));
4153 	amdgpu_ring_write(ring, 0);
4154 }
4155 
4156 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4157 {
4158 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4159 	uint32_t seq = ring->fence_drv.sync_seq;
4160 	uint64_t addr = ring->fence_drv.gpu_addr;
4161 
4162 	gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
4163 			      lower_32_bits(addr), upper_32_bits(addr),
4164 			      seq, 0xffffffff, 4);
4165 }
4166 
4167 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4168 					unsigned vmid, uint64_t pd_addr)
4169 {
4170 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4171 
4172 	/* compute doesn't have PFP */
4173 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4174 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
4175 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4176 		amdgpu_ring_write(ring, 0x0);
4177 	}
4178 }
4179 
4180 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4181 {
4182 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
4183 }
4184 
4185 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4186 {
4187 	u64 wptr;
4188 
4189 	/* XXX check if swapping is necessary on BE */
4190 	if (ring->use_doorbell)
4191 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4192 	else
4193 		BUG();
4194 	return wptr;
4195 }
4196 
4197 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
4198 					   bool acquire)
4199 {
4200 	struct amdgpu_device *adev = ring->adev;
4201 	int pipe_num, tmp, reg;
4202 	int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
4203 
4204 	pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
4205 
4206 	/* first me only has 2 entries, GFX and HP3D */
4207 	if (ring->me > 0)
4208 		pipe_num -= 2;
4209 
4210 	reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
4211 	tmp = RREG32(reg);
4212 	tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
4213 	WREG32(reg, tmp);
4214 }
4215 
4216 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
4217 					    struct amdgpu_ring *ring,
4218 					    bool acquire)
4219 {
4220 	int i, pipe;
4221 	bool reserve;
4222 	struct amdgpu_ring *iring;
4223 
4224 	mutex_lock(&adev->gfx.pipe_reserve_mutex);
4225 	pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
4226 	if (acquire)
4227 		set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4228 	else
4229 		clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4230 
4231 	if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
4232 		/* Clear all reservations - everyone reacquires all resources */
4233 		for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
4234 			gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
4235 						       true);
4236 
4237 		for (i = 0; i < adev->gfx.num_compute_rings; ++i)
4238 			gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
4239 						       true);
4240 	} else {
4241 		/* Lower all pipes without a current reservation */
4242 		for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
4243 			iring = &adev->gfx.gfx_ring[i];
4244 			pipe = amdgpu_gfx_queue_to_bit(adev,
4245 						       iring->me,
4246 						       iring->pipe,
4247 						       0);
4248 			reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4249 			gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4250 		}
4251 
4252 		for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
4253 			iring = &adev->gfx.compute_ring[i];
4254 			pipe = amdgpu_gfx_queue_to_bit(adev,
4255 						       iring->me,
4256 						       iring->pipe,
4257 						       0);
4258 			reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4259 			gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4260 		}
4261 	}
4262 
4263 	mutex_unlock(&adev->gfx.pipe_reserve_mutex);
4264 }
4265 
4266 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
4267 				      struct amdgpu_ring *ring,
4268 				      bool acquire)
4269 {
4270 	uint32_t pipe_priority = acquire ? 0x2 : 0x0;
4271 	uint32_t queue_priority = acquire ? 0xf : 0x0;
4272 
4273 	mutex_lock(&adev->srbm_mutex);
4274 	soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4275 
4276 	WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
4277 	WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
4278 
4279 	soc15_grbm_select(adev, 0, 0, 0, 0);
4280 	mutex_unlock(&adev->srbm_mutex);
4281 }
4282 
4283 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
4284 					       enum drm_sched_priority priority)
4285 {
4286 	struct amdgpu_device *adev = ring->adev;
4287 	bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
4288 
4289 	if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
4290 		return;
4291 
4292 	gfx_v9_0_hqd_set_priority(adev, ring, acquire);
4293 	gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
4294 }
4295 
4296 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4297 {
4298 	struct amdgpu_device *adev = ring->adev;
4299 
4300 	/* XXX check if swapping is necessary on BE */
4301 	if (ring->use_doorbell) {
4302 		atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4303 		WDOORBELL64(ring->doorbell_index, ring->wptr);
4304 	} else{
4305 		BUG(); /* only DOORBELL method supported on gfx9 now */
4306 	}
4307 }
4308 
4309 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4310 					 u64 seq, unsigned int flags)
4311 {
4312 	struct amdgpu_device *adev = ring->adev;
4313 
4314 	/* we only allocate 32bit for each seq wb address */
4315 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4316 
4317 	/* write fence seq to the "addr" */
4318 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4319 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4320 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4321 	amdgpu_ring_write(ring, lower_32_bits(addr));
4322 	amdgpu_ring_write(ring, upper_32_bits(addr));
4323 	amdgpu_ring_write(ring, lower_32_bits(seq));
4324 
4325 	if (flags & AMDGPU_FENCE_FLAG_INT) {
4326 		/* set register to trigger INT */
4327 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4328 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4329 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4330 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4331 		amdgpu_ring_write(ring, 0);
4332 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4333 	}
4334 }
4335 
4336 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
4337 {
4338 	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4339 	amdgpu_ring_write(ring, 0);
4340 }
4341 
4342 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
4343 {
4344 	struct v9_ce_ib_state ce_payload = {0};
4345 	uint64_t csa_addr;
4346 	int cnt;
4347 
4348 	cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4349 	csa_addr = amdgpu_csa_vaddr(ring->adev);
4350 
4351 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4352 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4353 				 WRITE_DATA_DST_SEL(8) |
4354 				 WR_CONFIRM) |
4355 				 WRITE_DATA_CACHE_POLICY(0));
4356 	amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4357 	amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4358 	amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
4359 }
4360 
4361 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
4362 {
4363 	struct v9_de_ib_state de_payload = {0};
4364 	uint64_t csa_addr, gds_addr;
4365 	int cnt;
4366 
4367 	csa_addr = amdgpu_csa_vaddr(ring->adev);
4368 	gds_addr = csa_addr + 4096;
4369 	de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4370 	de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4371 
4372 	cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4373 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4374 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4375 				 WRITE_DATA_DST_SEL(8) |
4376 				 WR_CONFIRM) |
4377 				 WRITE_DATA_CACHE_POLICY(0));
4378 	amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4379 	amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4380 	amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
4381 }
4382 
4383 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4384 {
4385 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4386 	amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4387 }
4388 
4389 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4390 {
4391 	uint32_t dw2 = 0;
4392 
4393 	if (amdgpu_sriov_vf(ring->adev))
4394 		gfx_v9_0_ring_emit_ce_meta(ring);
4395 
4396 	gfx_v9_0_ring_emit_tmz(ring, true);
4397 
4398 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4399 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4400 		/* set load_global_config & load_global_uconfig */
4401 		dw2 |= 0x8001;
4402 		/* set load_cs_sh_regs */
4403 		dw2 |= 0x01000000;
4404 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
4405 		dw2 |= 0x10002;
4406 
4407 		/* set load_ce_ram if preamble presented */
4408 		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4409 			dw2 |= 0x10000000;
4410 	} else {
4411 		/* still load_ce_ram if this is the first time preamble presented
4412 		 * although there is no context switch happens.
4413 		 */
4414 		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4415 			dw2 |= 0x10000000;
4416 	}
4417 
4418 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4419 	amdgpu_ring_write(ring, dw2);
4420 	amdgpu_ring_write(ring, 0);
4421 }
4422 
4423 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4424 {
4425 	unsigned ret;
4426 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4427 	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4428 	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4429 	amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4430 	ret = ring->wptr & ring->buf_mask;
4431 	amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4432 	return ret;
4433 }
4434 
4435 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4436 {
4437 	unsigned cur;
4438 	BUG_ON(offset > ring->buf_mask);
4439 	BUG_ON(ring->ring[offset] != 0x55aa55aa);
4440 
4441 	cur = (ring->wptr & ring->buf_mask) - 1;
4442 	if (likely(cur > offset))
4443 		ring->ring[offset] = cur - offset;
4444 	else
4445 		ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
4446 }
4447 
4448 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4449 {
4450 	struct amdgpu_device *adev = ring->adev;
4451 
4452 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4453 	amdgpu_ring_write(ring, 0 |	/* src: register*/
4454 				(5 << 8) |	/* dst: memory */
4455 				(1 << 20));	/* write confirm */
4456 	amdgpu_ring_write(ring, reg);
4457 	amdgpu_ring_write(ring, 0);
4458 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4459 				adev->virt.reg_val_offs * 4));
4460 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4461 				adev->virt.reg_val_offs * 4));
4462 }
4463 
4464 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4465 				    uint32_t val)
4466 {
4467 	uint32_t cmd = 0;
4468 
4469 	switch (ring->funcs->type) {
4470 	case AMDGPU_RING_TYPE_GFX:
4471 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4472 		break;
4473 	case AMDGPU_RING_TYPE_KIQ:
4474 		cmd = (1 << 16); /* no inc addr */
4475 		break;
4476 	default:
4477 		cmd = WR_CONFIRM;
4478 		break;
4479 	}
4480 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4481 	amdgpu_ring_write(ring, cmd);
4482 	amdgpu_ring_write(ring, reg);
4483 	amdgpu_ring_write(ring, 0);
4484 	amdgpu_ring_write(ring, val);
4485 }
4486 
4487 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4488 					uint32_t val, uint32_t mask)
4489 {
4490 	gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4491 }
4492 
4493 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4494 						  uint32_t reg0, uint32_t reg1,
4495 						  uint32_t ref, uint32_t mask)
4496 {
4497 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4498 	struct amdgpu_device *adev = ring->adev;
4499 	bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
4500 		adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
4501 
4502 	if (fw_version_ok)
4503 		gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4504 				      ref, mask, 0x20);
4505 	else
4506 		amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4507 							   ref, mask);
4508 }
4509 
4510 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4511 {
4512 	struct amdgpu_device *adev = ring->adev;
4513 	uint32_t value = 0;
4514 
4515 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
4516 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4517 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4518 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4519 	WREG32(mmSQ_CMD, value);
4520 }
4521 
4522 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4523 						 enum amdgpu_interrupt_state state)
4524 {
4525 	switch (state) {
4526 	case AMDGPU_IRQ_STATE_DISABLE:
4527 	case AMDGPU_IRQ_STATE_ENABLE:
4528 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4529 			       TIME_STAMP_INT_ENABLE,
4530 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4531 		break;
4532 	default:
4533 		break;
4534 	}
4535 }
4536 
4537 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4538 						     int me, int pipe,
4539 						     enum amdgpu_interrupt_state state)
4540 {
4541 	u32 mec_int_cntl, mec_int_cntl_reg;
4542 
4543 	/*
4544 	 * amdgpu controls only the first MEC. That's why this function only
4545 	 * handles the setting of interrupts for this specific MEC. All other
4546 	 * pipes' interrupts are set by amdkfd.
4547 	 */
4548 
4549 	if (me == 1) {
4550 		switch (pipe) {
4551 		case 0:
4552 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4553 			break;
4554 		case 1:
4555 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4556 			break;
4557 		case 2:
4558 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4559 			break;
4560 		case 3:
4561 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4562 			break;
4563 		default:
4564 			DRM_DEBUG("invalid pipe %d\n", pipe);
4565 			return;
4566 		}
4567 	} else {
4568 		DRM_DEBUG("invalid me %d\n", me);
4569 		return;
4570 	}
4571 
4572 	switch (state) {
4573 	case AMDGPU_IRQ_STATE_DISABLE:
4574 		mec_int_cntl = RREG32(mec_int_cntl_reg);
4575 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4576 					     TIME_STAMP_INT_ENABLE, 0);
4577 		WREG32(mec_int_cntl_reg, mec_int_cntl);
4578 		break;
4579 	case AMDGPU_IRQ_STATE_ENABLE:
4580 		mec_int_cntl = RREG32(mec_int_cntl_reg);
4581 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4582 					     TIME_STAMP_INT_ENABLE, 1);
4583 		WREG32(mec_int_cntl_reg, mec_int_cntl);
4584 		break;
4585 	default:
4586 		break;
4587 	}
4588 }
4589 
4590 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4591 					     struct amdgpu_irq_src *source,
4592 					     unsigned type,
4593 					     enum amdgpu_interrupt_state state)
4594 {
4595 	switch (state) {
4596 	case AMDGPU_IRQ_STATE_DISABLE:
4597 	case AMDGPU_IRQ_STATE_ENABLE:
4598 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4599 			       PRIV_REG_INT_ENABLE,
4600 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4601 		break;
4602 	default:
4603 		break;
4604 	}
4605 
4606 	return 0;
4607 }
4608 
4609 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4610 					      struct amdgpu_irq_src *source,
4611 					      unsigned type,
4612 					      enum amdgpu_interrupt_state state)
4613 {
4614 	switch (state) {
4615 	case AMDGPU_IRQ_STATE_DISABLE:
4616 	case AMDGPU_IRQ_STATE_ENABLE:
4617 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4618 			       PRIV_INSTR_INT_ENABLE,
4619 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4620 	default:
4621 		break;
4622 	}
4623 
4624 	return 0;
4625 }
4626 
4627 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4628 					    struct amdgpu_irq_src *src,
4629 					    unsigned type,
4630 					    enum amdgpu_interrupt_state state)
4631 {
4632 	switch (type) {
4633 	case AMDGPU_CP_IRQ_GFX_EOP:
4634 		gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
4635 		break;
4636 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4637 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4638 		break;
4639 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4640 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4641 		break;
4642 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4643 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4644 		break;
4645 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4646 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4647 		break;
4648 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4649 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4650 		break;
4651 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4652 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4653 		break;
4654 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4655 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4656 		break;
4657 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4658 		gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4659 		break;
4660 	default:
4661 		break;
4662 	}
4663 	return 0;
4664 }
4665 
4666 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
4667 			    struct amdgpu_irq_src *source,
4668 			    struct amdgpu_iv_entry *entry)
4669 {
4670 	int i;
4671 	u8 me_id, pipe_id, queue_id;
4672 	struct amdgpu_ring *ring;
4673 
4674 	DRM_DEBUG("IH: CP EOP\n");
4675 	me_id = (entry->ring_id & 0x0c) >> 2;
4676 	pipe_id = (entry->ring_id & 0x03) >> 0;
4677 	queue_id = (entry->ring_id & 0x70) >> 4;
4678 
4679 	switch (me_id) {
4680 	case 0:
4681 		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4682 		break;
4683 	case 1:
4684 	case 2:
4685 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4686 			ring = &adev->gfx.compute_ring[i];
4687 			/* Per-queue interrupt is supported for MEC starting from VI.
4688 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
4689 			  */
4690 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4691 				amdgpu_fence_process(ring);
4692 		}
4693 		break;
4694 	}
4695 	return 0;
4696 }
4697 
4698 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4699 				 struct amdgpu_irq_src *source,
4700 				 struct amdgpu_iv_entry *entry)
4701 {
4702 	DRM_ERROR("Illegal register access in command stream\n");
4703 	schedule_work(&adev->reset_work);
4704 	return 0;
4705 }
4706 
4707 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4708 				  struct amdgpu_irq_src *source,
4709 				  struct amdgpu_iv_entry *entry)
4710 {
4711 	DRM_ERROR("Illegal instruction in command stream\n");
4712 	schedule_work(&adev->reset_work);
4713 	return 0;
4714 }
4715 
4716 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4717 	.name = "gfx_v9_0",
4718 	.early_init = gfx_v9_0_early_init,
4719 	.late_init = gfx_v9_0_late_init,
4720 	.sw_init = gfx_v9_0_sw_init,
4721 	.sw_fini = gfx_v9_0_sw_fini,
4722 	.hw_init = gfx_v9_0_hw_init,
4723 	.hw_fini = gfx_v9_0_hw_fini,
4724 	.suspend = gfx_v9_0_suspend,
4725 	.resume = gfx_v9_0_resume,
4726 	.is_idle = gfx_v9_0_is_idle,
4727 	.wait_for_idle = gfx_v9_0_wait_for_idle,
4728 	.soft_reset = gfx_v9_0_soft_reset,
4729 	.set_clockgating_state = gfx_v9_0_set_clockgating_state,
4730 	.set_powergating_state = gfx_v9_0_set_powergating_state,
4731 	.get_clockgating_state = gfx_v9_0_get_clockgating_state,
4732 };
4733 
4734 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4735 	.type = AMDGPU_RING_TYPE_GFX,
4736 	.align_mask = 0xff,
4737 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4738 	.support_64bit_ptrs = true,
4739 	.vmhub = AMDGPU_GFXHUB,
4740 	.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4741 	.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4742 	.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4743 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
4744 		5 +  /* COND_EXEC */
4745 		7 +  /* PIPELINE_SYNC */
4746 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4747 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4748 		2 + /* VM_FLUSH */
4749 		8 +  /* FENCE for VM_FLUSH */
4750 		20 + /* GDS switch */
4751 		4 + /* double SWITCH_BUFFER,
4752 		       the first COND_EXEC jump to the place just
4753 			   prior to this double SWITCH_BUFFER  */
4754 		5 + /* COND_EXEC */
4755 		7 +	 /*	HDP_flush */
4756 		4 +	 /*	VGT_flush */
4757 		14 + /*	CE_META */
4758 		31 + /*	DE_META */
4759 		3 + /* CNTX_CTRL */
4760 		5 + /* HDP_INVL */
4761 		8 + 8 + /* FENCE x2 */
4762 		2, /* SWITCH_BUFFER */
4763 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_gfx */
4764 	.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4765 	.emit_fence = gfx_v9_0_ring_emit_fence,
4766 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4767 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4768 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4769 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4770 	.test_ring = gfx_v9_0_ring_test_ring,
4771 	.test_ib = gfx_v9_0_ring_test_ib,
4772 	.insert_nop = amdgpu_ring_insert_nop,
4773 	.pad_ib = amdgpu_ring_generic_pad_ib,
4774 	.emit_switch_buffer = gfx_v9_ring_emit_sb,
4775 	.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4776 	.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4777 	.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4778 	.emit_tmz = gfx_v9_0_ring_emit_tmz,
4779 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
4780 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4781 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4782 	.soft_recovery = gfx_v9_0_ring_soft_recovery,
4783 };
4784 
4785 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4786 	.type = AMDGPU_RING_TYPE_COMPUTE,
4787 	.align_mask = 0xff,
4788 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4789 	.support_64bit_ptrs = true,
4790 	.vmhub = AMDGPU_GFXHUB,
4791 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
4792 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
4793 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
4794 	.emit_frame_size =
4795 		20 + /* gfx_v9_0_ring_emit_gds_switch */
4796 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
4797 		5 + /* hdp invalidate */
4798 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4799 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4800 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4801 		2 + /* gfx_v9_0_ring_emit_vm_flush */
4802 		8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4803 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_compute */
4804 	.emit_ib = gfx_v9_0_ring_emit_ib_compute,
4805 	.emit_fence = gfx_v9_0_ring_emit_fence,
4806 	.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4807 	.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4808 	.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4809 	.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4810 	.test_ring = gfx_v9_0_ring_test_ring,
4811 	.test_ib = gfx_v9_0_ring_test_ib,
4812 	.insert_nop = amdgpu_ring_insert_nop,
4813 	.pad_ib = amdgpu_ring_generic_pad_ib,
4814 	.set_priority = gfx_v9_0_ring_set_priority_compute,
4815 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
4816 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4817 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4818 };
4819 
4820 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4821 	.type = AMDGPU_RING_TYPE_KIQ,
4822 	.align_mask = 0xff,
4823 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4824 	.support_64bit_ptrs = true,
4825 	.vmhub = AMDGPU_GFXHUB,
4826 	.get_rptr = gfx_v9_0_ring_get_rptr_compute,
4827 	.get_wptr = gfx_v9_0_ring_get_wptr_compute,
4828 	.set_wptr = gfx_v9_0_ring_set_wptr_compute,
4829 	.emit_frame_size =
4830 		20 + /* gfx_v9_0_ring_emit_gds_switch */
4831 		7 + /* gfx_v9_0_ring_emit_hdp_flush */
4832 		5 + /* hdp invalidate */
4833 		7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4834 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4835 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4836 		2 + /* gfx_v9_0_ring_emit_vm_flush */
4837 		8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4838 	.emit_ib_size =	4, /* gfx_v9_0_ring_emit_ib_compute */
4839 	.emit_ib = gfx_v9_0_ring_emit_ib_compute,
4840 	.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4841 	.test_ring = gfx_v9_0_ring_test_ring,
4842 	.test_ib = gfx_v9_0_ring_test_ib,
4843 	.insert_nop = amdgpu_ring_insert_nop,
4844 	.pad_ib = amdgpu_ring_generic_pad_ib,
4845 	.emit_rreg = gfx_v9_0_ring_emit_rreg,
4846 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
4847 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4848 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4849 };
4850 
4851 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4852 {
4853 	int i;
4854 
4855 	adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
4856 
4857 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4858 		adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
4859 
4860 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
4861 		adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4862 }
4863 
4864 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4865 	.set = gfx_v9_0_set_eop_interrupt_state,
4866 	.process = gfx_v9_0_eop_irq,
4867 };
4868 
4869 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
4870 	.set = gfx_v9_0_set_priv_reg_fault_state,
4871 	.process = gfx_v9_0_priv_reg_irq,
4872 };
4873 
4874 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
4875 	.set = gfx_v9_0_set_priv_inst_fault_state,
4876 	.process = gfx_v9_0_priv_inst_irq,
4877 };
4878 
4879 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4880 {
4881 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4882 	adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
4883 
4884 	adev->gfx.priv_reg_irq.num_types = 1;
4885 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
4886 
4887 	adev->gfx.priv_inst_irq.num_types = 1;
4888 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4889 }
4890 
4891 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
4892 {
4893 	switch (adev->asic_type) {
4894 	case CHIP_VEGA10:
4895 	case CHIP_VEGA12:
4896 	case CHIP_VEGA20:
4897 	case CHIP_RAVEN:
4898 		adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
4899 		break;
4900 	default:
4901 		break;
4902 	}
4903 }
4904 
4905 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
4906 {
4907 	/* init asci gds info */
4908 	switch (adev->asic_type) {
4909 	case CHIP_VEGA10:
4910 	case CHIP_VEGA12:
4911 	case CHIP_VEGA20:
4912 		adev->gds.mem.total_size = 0x10000;
4913 		break;
4914 	case CHIP_RAVEN:
4915 		adev->gds.mem.total_size = 0x1000;
4916 		break;
4917 	default:
4918 		adev->gds.mem.total_size = 0x10000;
4919 		break;
4920 	}
4921 
4922 	adev->gds.gws.total_size = 64;
4923 	adev->gds.oa.total_size = 16;
4924 
4925 	if (adev->gds.mem.total_size == 64 * 1024) {
4926 		adev->gds.mem.gfx_partition_size = 4096;
4927 		adev->gds.mem.cs_partition_size = 4096;
4928 
4929 		adev->gds.gws.gfx_partition_size = 4;
4930 		adev->gds.gws.cs_partition_size = 4;
4931 
4932 		adev->gds.oa.gfx_partition_size = 4;
4933 		adev->gds.oa.cs_partition_size = 1;
4934 	} else {
4935 		adev->gds.mem.gfx_partition_size = 1024;
4936 		adev->gds.mem.cs_partition_size = 1024;
4937 
4938 		adev->gds.gws.gfx_partition_size = 16;
4939 		adev->gds.gws.cs_partition_size = 16;
4940 
4941 		adev->gds.oa.gfx_partition_size = 4;
4942 		adev->gds.oa.cs_partition_size = 4;
4943 	}
4944 }
4945 
4946 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4947 						 u32 bitmap)
4948 {
4949 	u32 data;
4950 
4951 	if (!bitmap)
4952 		return;
4953 
4954 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4955 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4956 
4957 	WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
4958 }
4959 
4960 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4961 {
4962 	u32 data, mask;
4963 
4964 	data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
4965 	data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
4966 
4967 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4968 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4969 
4970 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4971 
4972 	return (~data) & mask;
4973 }
4974 
4975 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
4976 				 struct amdgpu_cu_info *cu_info)
4977 {
4978 	int i, j, k, counter, active_cu_number = 0;
4979 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4980 	unsigned disable_masks[4 * 2];
4981 
4982 	if (!adev || !cu_info)
4983 		return -EINVAL;
4984 
4985 	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
4986 
4987 	mutex_lock(&adev->grbm_idx_mutex);
4988 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4989 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4990 			mask = 1;
4991 			ao_bitmap = 0;
4992 			counter = 0;
4993 			gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4994 			if (i < 4 && j < 2)
4995 				gfx_v9_0_set_user_cu_inactive_bitmap(
4996 					adev, disable_masks[i * 2 + j]);
4997 			bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
4998 			cu_info->bitmap[i][j] = bitmap;
4999 
5000 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
5001 				if (bitmap & mask) {
5002 					if (counter < adev->gfx.config.max_cu_per_sh)
5003 						ao_bitmap |= mask;
5004 					counter ++;
5005 				}
5006 				mask <<= 1;
5007 			}
5008 			active_cu_number += counter;
5009 			if (i < 2 && j < 2)
5010 				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5011 			cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5012 		}
5013 	}
5014 	gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5015 	mutex_unlock(&adev->grbm_idx_mutex);
5016 
5017 	cu_info->number = active_cu_number;
5018 	cu_info->ao_cu_mask = ao_cu_mask;
5019 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5020 
5021 	return 0;
5022 }
5023 
5024 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
5025 {
5026 	.type = AMD_IP_BLOCK_TYPE_GFX,
5027 	.major = 9,
5028 	.minor = 0,
5029 	.rev = 0,
5030 	.funcs = &gfx_v9_0_ip_funcs,
5031 };
5032