1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include "amdgpu.h"
30 #include "amdgpu_gfx.h"
31 #include "amdgpu_psp.h"
32 #include "amdgpu_smu.h"
33 #include "nv.h"
34 #include "nvd.h"
35 
36 #include "gc/gc_10_1_0_offset.h"
37 #include "gc/gc_10_1_0_sh_mask.h"
38 #include "navi10_enum.h"
39 #include "hdp/hdp_5_0_0_offset.h"
40 #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
41 
42 #include "soc15.h"
43 #include "soc15_common.h"
44 #include "clearstate_gfx10.h"
45 #include "v10_structs.h"
46 #include "gfx_v10_0.h"
47 #include "nbio_v2_3.h"
48 
49 /**
50  * Navi10 has two graphic rings to share each graphic pipe.
51  * 1. Primary ring
52  * 2. Async ring
53  *
54  * In bring-up phase, it just used primary ring so set gfx ring number as 1 at
55  * first.
56  */
57 #define GFX10_NUM_GFX_RINGS	2
58 #define GFX10_MEC_HPD_SIZE	2048
59 
60 #define F32_CE_PROGRAM_RAM_SIZE		65536
61 #define RLCG_UCODE_LOADING_START_ADDRESS	0x00002000L
62 
63 #define mmCGTT_GS_NGG_CLK_CTRL	0x5087
64 #define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX	1
65 
66 MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
67 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/navi10_me.bin");
69 MODULE_FIRMWARE("amdgpu/navi10_mec.bin");
70 MODULE_FIRMWARE("amdgpu/navi10_mec2.bin");
71 MODULE_FIRMWARE("amdgpu/navi10_rlc.bin");
72 
73 MODULE_FIRMWARE("amdgpu/navi14_ce_wks.bin");
74 MODULE_FIRMWARE("amdgpu/navi14_pfp_wks.bin");
75 MODULE_FIRMWARE("amdgpu/navi14_me_wks.bin");
76 MODULE_FIRMWARE("amdgpu/navi14_mec_wks.bin");
77 MODULE_FIRMWARE("amdgpu/navi14_mec2_wks.bin");
78 MODULE_FIRMWARE("amdgpu/navi14_ce.bin");
79 MODULE_FIRMWARE("amdgpu/navi14_pfp.bin");
80 MODULE_FIRMWARE("amdgpu/navi14_me.bin");
81 MODULE_FIRMWARE("amdgpu/navi14_mec.bin");
82 MODULE_FIRMWARE("amdgpu/navi14_mec2.bin");
83 MODULE_FIRMWARE("amdgpu/navi14_rlc.bin");
84 
85 MODULE_FIRMWARE("amdgpu/navi12_ce.bin");
86 MODULE_FIRMWARE("amdgpu/navi12_pfp.bin");
87 MODULE_FIRMWARE("amdgpu/navi12_me.bin");
88 MODULE_FIRMWARE("amdgpu/navi12_mec.bin");
89 MODULE_FIRMWARE("amdgpu/navi12_mec2.bin");
90 MODULE_FIRMWARE("amdgpu/navi12_rlc.bin");
91 
92 static const struct soc15_reg_golden golden_settings_gc_10_1[] =
93 {
94 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
95 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
96 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
97 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
98 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
99 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
100 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xfeff8fff, 0xfeff8100),
101 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
102 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
103 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x000007ff, 0x000005ff),
104 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0x20000000, 0x20000000),
105 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
106 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
107 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07900000, 0x04900000),
108 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
109 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
110 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
111 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
112 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
113 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
114 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
115 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
116 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
117 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
118 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
119 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188),
120 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
121 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
122 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
123 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
124 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
125 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
126 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
127 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
128 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
129 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
130 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
131 };
132 
133 static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
134 {
135 	/* Pending on emulation bring up */
136 };
137 
138 static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
139 {
140 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
141 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
142 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
143 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
144 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
145 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
146 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
147 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
148 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
149 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000),
150 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
151 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
152 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
153 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200),
154 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
155 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
156 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
157 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
158 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe),
159 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
160 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
161 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7),
162 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
163 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
164 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
165 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
166 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
167 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
168 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
169 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
170 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
171 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
172 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
173 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
174 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
175 };
176 
177 static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
178 {
179 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
180 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
181 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
182 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
183 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
184 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
185 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
186 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100),
187 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
188 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000003, 0x00000000),
189 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff),
190 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
191 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
192 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
193 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
194 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
195 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
196 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
197 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
198 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
199 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032),
200 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231),
201 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
202 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
203 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
204 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
205 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
206 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_0, 0xffffffff, 0x842a4c02),
207 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
208 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
209 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04440000),
210 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
211 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
212 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
213 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
214 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
215 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
216 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
217 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
218 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
219 };
220 
221 static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
222 {
223 	/* Pending on emulation bring up */
224 };
225 
226 static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
227 {
228 	/* Pending on emulation bring up */
229 };
230 
231 #define DEFAULT_SH_MEM_CONFIG \
232 	((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
233 	 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
234 	 (SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
235 	 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
236 
237 
238 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev);
239 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev);
240 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev);
241 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev);
242 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
243                                  struct amdgpu_cu_info *cu_info);
244 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev);
245 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
246 				   u32 sh_num, u32 instance);
247 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
248 
249 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev);
250 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev);
251 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
252 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
253 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
254 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
255 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
256 
257 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
258 {
259 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
260 	amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
261 			  PACKET3_SET_RESOURCES_QUEUE_TYPE(0));	/* vmid_mask:0 queue_type:0 (KIQ) */
262 	amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask));	/* queue mask lo */
263 	amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask));	/* queue mask hi */
264 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
265 	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
266 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
267 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
268 }
269 
270 static void gfx10_kiq_map_queues(struct amdgpu_ring *kiq_ring,
271 				 struct amdgpu_ring *ring)
272 {
273 	struct amdgpu_device *adev = kiq_ring->adev;
274 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
275 	uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
276 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
277 
278 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
279 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
280 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
281 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
282 			  PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
283 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
284 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
285 			  PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
286 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
287 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
288 			  PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
289 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
290 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
291 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
292 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
293 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
294 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
295 }
296 
297 static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
298 				   struct amdgpu_ring *ring,
299 				   enum amdgpu_unmap_queues_action action,
300 				   u64 gpu_addr, u64 seq)
301 {
302 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
303 
304 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
305 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
306 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
307 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
308 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
309 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
310 	amdgpu_ring_write(kiq_ring,
311 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
312 
313 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
314 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
315 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
316 		amdgpu_ring_write(kiq_ring, seq);
317 	} else {
318 		amdgpu_ring_write(kiq_ring, 0);
319 		amdgpu_ring_write(kiq_ring, 0);
320 		amdgpu_ring_write(kiq_ring, 0);
321 	}
322 }
323 
324 static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring,
325 				   struct amdgpu_ring *ring,
326 				   u64 addr,
327 				   u64 seq)
328 {
329 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
330 
331 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
332 	amdgpu_ring_write(kiq_ring,
333 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
334 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
335 			  PACKET3_QUERY_STATUS_COMMAND(2));
336 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
337 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
338 			  PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
339 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
340 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
341 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
342 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
343 }
344 
345 static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
346 	.kiq_set_resources = gfx10_kiq_set_resources,
347 	.kiq_map_queues = gfx10_kiq_map_queues,
348 	.kiq_unmap_queues = gfx10_kiq_unmap_queues,
349 	.kiq_query_status = gfx10_kiq_query_status,
350 	.set_resources_size = 8,
351 	.map_queues_size = 7,
352 	.unmap_queues_size = 6,
353 	.query_status_size = 7,
354 };
355 
356 static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
357 {
358 	adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
359 }
360 
361 static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
362 {
363 	switch (adev->asic_type) {
364 	case CHIP_NAVI10:
365 		soc15_program_register_sequence(adev,
366 						golden_settings_gc_10_1,
367 						(const u32)ARRAY_SIZE(golden_settings_gc_10_1));
368 		soc15_program_register_sequence(adev,
369 						golden_settings_gc_10_0_nv10,
370 						(const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
371 		break;
372 	case CHIP_NAVI14:
373 		soc15_program_register_sequence(adev,
374 						golden_settings_gc_10_1_1,
375 						(const u32)ARRAY_SIZE(golden_settings_gc_10_1_1));
376 		soc15_program_register_sequence(adev,
377 						golden_settings_gc_10_1_nv14,
378 						(const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
379 		break;
380 	case CHIP_NAVI12:
381 		soc15_program_register_sequence(adev,
382 						golden_settings_gc_10_1_2,
383 						(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2));
384 		soc15_program_register_sequence(adev,
385 						golden_settings_gc_10_1_2_nv12,
386 						(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
387 		break;
388 	default:
389 		break;
390 	}
391 }
392 
393 static void gfx_v10_0_scratch_init(struct amdgpu_device *adev)
394 {
395 	adev->gfx.scratch.num_reg = 8;
396 	adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
397 	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
398 }
399 
400 static void gfx_v10_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
401 				       bool wc, uint32_t reg, uint32_t val)
402 {
403 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
404 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
405 			  WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
406 	amdgpu_ring_write(ring, reg);
407 	amdgpu_ring_write(ring, 0);
408 	amdgpu_ring_write(ring, val);
409 }
410 
411 static void gfx_v10_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
412 				  int mem_space, int opt, uint32_t addr0,
413 				  uint32_t addr1, uint32_t ref, uint32_t mask,
414 				  uint32_t inv)
415 {
416 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
417 	amdgpu_ring_write(ring,
418 			  /* memory (1) or register (0) */
419 			  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
420 			   WAIT_REG_MEM_OPERATION(opt) | /* wait */
421 			   WAIT_REG_MEM_FUNCTION(3) |  /* equal */
422 			   WAIT_REG_MEM_ENGINE(eng_sel)));
423 
424 	if (mem_space)
425 		BUG_ON(addr0 & 0x3); /* Dword align */
426 	amdgpu_ring_write(ring, addr0);
427 	amdgpu_ring_write(ring, addr1);
428 	amdgpu_ring_write(ring, ref);
429 	amdgpu_ring_write(ring, mask);
430 	amdgpu_ring_write(ring, inv); /* poll interval */
431 }
432 
433 static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
434 {
435 	struct amdgpu_device *adev = ring->adev;
436 	uint32_t scratch;
437 	uint32_t tmp = 0;
438 	unsigned i;
439 	int r;
440 
441 	r = amdgpu_gfx_scratch_get(adev, &scratch);
442 	if (r) {
443 		DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
444 		return r;
445 	}
446 
447 	WREG32(scratch, 0xCAFEDEAD);
448 
449 	r = amdgpu_ring_alloc(ring, 3);
450 	if (r) {
451 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
452 			  ring->idx, r);
453 		amdgpu_gfx_scratch_free(adev, scratch);
454 		return r;
455 	}
456 
457 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
458 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
459 	amdgpu_ring_write(ring, 0xDEADBEEF);
460 	amdgpu_ring_commit(ring);
461 
462 	for (i = 0; i < adev->usec_timeout; i++) {
463 		tmp = RREG32(scratch);
464 		if (tmp == 0xDEADBEEF)
465 			break;
466 		if (amdgpu_emu_mode == 1)
467 			msleep(1);
468 		else
469 			udelay(1);
470 	}
471 	if (i < adev->usec_timeout) {
472 		if (amdgpu_emu_mode == 1)
473 			DRM_INFO("ring test on %d succeeded in %d msecs\n",
474 				 ring->idx, i);
475 		else
476 			DRM_INFO("ring test on %d succeeded in %d usecs\n",
477 				 ring->idx, i);
478 	} else {
479 		DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
480 			  ring->idx, scratch, tmp);
481 		r = -EINVAL;
482 	}
483 	amdgpu_gfx_scratch_free(adev, scratch);
484 
485 	return r;
486 }
487 
488 static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
489 {
490 	struct amdgpu_device *adev = ring->adev;
491 	struct amdgpu_ib ib;
492 	struct dma_fence *f = NULL;
493 	uint32_t scratch;
494 	uint32_t tmp = 0;
495 	long r;
496 
497 	r = amdgpu_gfx_scratch_get(adev, &scratch);
498 	if (r) {
499 		DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
500 		return r;
501 	}
502 
503 	WREG32(scratch, 0xCAFEDEAD);
504 
505 	memset(&ib, 0, sizeof(ib));
506 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
507 	if (r) {
508 		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
509 		goto err1;
510 	}
511 
512 	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
513 	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
514 	ib.ptr[2] = 0xDEADBEEF;
515 	ib.length_dw = 3;
516 
517 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
518 	if (r)
519 		goto err2;
520 
521 	r = dma_fence_wait_timeout(f, false, timeout);
522 	if (r == 0) {
523 		DRM_ERROR("amdgpu: IB test timed out.\n");
524 		r = -ETIMEDOUT;
525 		goto err2;
526 	} else if (r < 0) {
527 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
528 		goto err2;
529 	}
530 
531 	tmp = RREG32(scratch);
532 	if (tmp == 0xDEADBEEF) {
533 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
534 		r = 0;
535 	} else {
536 		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
537 			  scratch, tmp);
538 		r = -EINVAL;
539 	}
540 err2:
541 	amdgpu_ib_free(adev, &ib, NULL);
542 	dma_fence_put(f);
543 err1:
544 	amdgpu_gfx_scratch_free(adev, scratch);
545 
546 	return r;
547 }
548 
549 static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
550 {
551 	release_firmware(adev->gfx.pfp_fw);
552 	adev->gfx.pfp_fw = NULL;
553 	release_firmware(adev->gfx.me_fw);
554 	adev->gfx.me_fw = NULL;
555 	release_firmware(adev->gfx.ce_fw);
556 	adev->gfx.ce_fw = NULL;
557 	release_firmware(adev->gfx.rlc_fw);
558 	adev->gfx.rlc_fw = NULL;
559 	release_firmware(adev->gfx.mec_fw);
560 	adev->gfx.mec_fw = NULL;
561 	release_firmware(adev->gfx.mec2_fw);
562 	adev->gfx.mec2_fw = NULL;
563 
564 	kfree(adev->gfx.rlc.register_list_format);
565 }
566 
567 static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
568 {
569 	adev->gfx.cp_fw_write_wait = false;
570 
571 	switch (adev->asic_type) {
572 	case CHIP_NAVI10:
573 	case CHIP_NAVI12:
574 	case CHIP_NAVI14:
575 		if ((adev->gfx.me_fw_version >= 0x00000046) &&
576 		    (adev->gfx.me_feature_version >= 27) &&
577 		    (adev->gfx.pfp_fw_version >= 0x00000068) &&
578 		    (adev->gfx.pfp_feature_version >= 27) &&
579 		    (adev->gfx.mec_fw_version >= 0x0000005b) &&
580 		    (adev->gfx.mec_feature_version >= 27))
581 			adev->gfx.cp_fw_write_wait = true;
582 		break;
583 	default:
584 		break;
585 	}
586 
587 	if (adev->gfx.cp_fw_write_wait == false)
588 		DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
589 			      GRBM requires 1-cycle delay in cp firmware\n");
590 }
591 
592 
593 static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
594 {
595 	const struct rlc_firmware_header_v2_1 *rlc_hdr;
596 
597 	rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
598 	adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
599 	adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
600 	adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
601 	adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
602 	adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
603 	adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
604 	adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
605 	adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
606 	adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
607 	adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
608 	adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
609 	adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
610 	adev->gfx.rlc.reg_list_format_direct_reg_list_length =
611 			le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
612 }
613 
614 static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
615 {
616 	switch (adev->asic_type) {
617 	case CHIP_NAVI10:
618 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
619 		break;
620 	default:
621 		break;
622 	}
623 }
624 
625 static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
626 {
627 	const char *chip_name;
628 	char fw_name[40];
629 	char wks[10];
630 	int err;
631 	struct amdgpu_firmware_info *info = NULL;
632 	const struct common_firmware_header *header = NULL;
633 	const struct gfx_firmware_header_v1_0 *cp_hdr;
634 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
635 	unsigned int *tmp = NULL;
636 	unsigned int i = 0;
637 	uint16_t version_major;
638 	uint16_t version_minor;
639 
640 	DRM_DEBUG("\n");
641 
642 	memset(wks, 0, sizeof(wks));
643 	switch (adev->asic_type) {
644 	case CHIP_NAVI10:
645 		chip_name = "navi10";
646 		break;
647 	case CHIP_NAVI14:
648 		chip_name = "navi14";
649 		if (!(adev->pdev->device == 0x7340 &&
650 		      adev->pdev->revision != 0x00))
651 			snprintf(wks, sizeof(wks), "_wks");
652 		break;
653 	case CHIP_NAVI12:
654 		chip_name = "navi12";
655 		break;
656 	default:
657 		BUG();
658 	}
659 
660 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", chip_name, wks);
661 	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
662 	if (err)
663 		goto out;
664 	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
665 	if (err)
666 		goto out;
667 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
668 	adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
669 	adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
670 
671 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
672 	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
673 	if (err)
674 		goto out;
675 	err = amdgpu_ucode_validate(adev->gfx.me_fw);
676 	if (err)
677 		goto out;
678 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
679 	adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
680 	adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
681 
682 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
683 	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
684 	if (err)
685 		goto out;
686 	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
687 	if (err)
688 		goto out;
689 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
690 	adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
691 	adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
692 
693 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
694 	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
695 	if (err)
696 		goto out;
697 	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
698 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
699 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
700 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
701 	if (version_major == 2 && version_minor == 1)
702 		adev->gfx.rlc.is_rlc_v2_1 = true;
703 
704 	adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
705 	adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
706 	adev->gfx.rlc.save_and_restore_offset =
707 			le32_to_cpu(rlc_hdr->save_and_restore_offset);
708 	adev->gfx.rlc.clear_state_descriptor_offset =
709 			le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
710 	adev->gfx.rlc.avail_scratch_ram_locations =
711 			le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
712 	adev->gfx.rlc.reg_restore_list_size =
713 			le32_to_cpu(rlc_hdr->reg_restore_list_size);
714 	adev->gfx.rlc.reg_list_format_start =
715 			le32_to_cpu(rlc_hdr->reg_list_format_start);
716 	adev->gfx.rlc.reg_list_format_separate_start =
717 			le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
718 	adev->gfx.rlc.starting_offsets_start =
719 			le32_to_cpu(rlc_hdr->starting_offsets_start);
720 	adev->gfx.rlc.reg_list_format_size_bytes =
721 			le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
722 	adev->gfx.rlc.reg_list_size_bytes =
723 			le32_to_cpu(rlc_hdr->reg_list_size_bytes);
724 	adev->gfx.rlc.register_list_format =
725 			kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
726 				adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
727 	if (!adev->gfx.rlc.register_list_format) {
728 		err = -ENOMEM;
729 		goto out;
730 	}
731 
732 	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
733 			le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
734 	for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
735 		adev->gfx.rlc.register_list_format[i] =	le32_to_cpu(tmp[i]);
736 
737 	adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
738 
739 	tmp = (unsigned int *)((uintptr_t)rlc_hdr +
740 			le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
741 	for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
742 		adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
743 
744 	if (adev->gfx.rlc.is_rlc_v2_1)
745 		gfx_v10_0_init_rlc_ext_microcode(adev);
746 
747 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
748 	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
749 	if (err)
750 		goto out;
751 	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
752 	if (err)
753 		goto out;
754 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
755 	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
756 	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
757 
758 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
759 	err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
760 	if (!err) {
761 		err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
762 		if (err)
763 			goto out;
764 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
765 		adev->gfx.mec2_fw->data;
766 		adev->gfx.mec2_fw_version =
767 		le32_to_cpu(cp_hdr->header.ucode_version);
768 		adev->gfx.mec2_feature_version =
769 		le32_to_cpu(cp_hdr->ucode_feature_version);
770 	} else {
771 		err = 0;
772 		adev->gfx.mec2_fw = NULL;
773 	}
774 
775 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
776 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
777 		info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
778 		info->fw = adev->gfx.pfp_fw;
779 		header = (const struct common_firmware_header *)info->fw->data;
780 		adev->firmware.fw_size +=
781 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
782 
783 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
784 		info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
785 		info->fw = adev->gfx.me_fw;
786 		header = (const struct common_firmware_header *)info->fw->data;
787 		adev->firmware.fw_size +=
788 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
789 
790 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
791 		info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
792 		info->fw = adev->gfx.ce_fw;
793 		header = (const struct common_firmware_header *)info->fw->data;
794 		adev->firmware.fw_size +=
795 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
796 
797 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
798 		info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
799 		info->fw = adev->gfx.rlc_fw;
800 		header = (const struct common_firmware_header *)info->fw->data;
801 		adev->firmware.fw_size +=
802 			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
803 
804 		if (adev->gfx.rlc.is_rlc_v2_1 &&
805 		    adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
806 		    adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
807 		    adev->gfx.rlc.save_restore_list_srm_size_bytes) {
808 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
809 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
810 			info->fw = adev->gfx.rlc_fw;
811 			adev->firmware.fw_size +=
812 				ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
813 
814 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
815 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
816 			info->fw = adev->gfx.rlc_fw;
817 			adev->firmware.fw_size +=
818 				ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
819 
820 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
821 			info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
822 			info->fw = adev->gfx.rlc_fw;
823 			adev->firmware.fw_size +=
824 				ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
825 		}
826 
827 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
828 		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
829 		info->fw = adev->gfx.mec_fw;
830 		header = (const struct common_firmware_header *)info->fw->data;
831 		cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
832 		adev->firmware.fw_size +=
833 			ALIGN(le32_to_cpu(header->ucode_size_bytes) -
834 			      le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
835 
836 		info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
837 		info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
838 		info->fw = adev->gfx.mec_fw;
839 		adev->firmware.fw_size +=
840 			ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
841 
842 		if (adev->gfx.mec2_fw) {
843 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
844 			info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
845 			info->fw = adev->gfx.mec2_fw;
846 			header = (const struct common_firmware_header *)info->fw->data;
847 			cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
848 			adev->firmware.fw_size +=
849 				ALIGN(le32_to_cpu(header->ucode_size_bytes) -
850 				      le32_to_cpu(cp_hdr->jt_size) * 4,
851 				      PAGE_SIZE);
852 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
853 			info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
854 			info->fw = adev->gfx.mec2_fw;
855 			adev->firmware.fw_size +=
856 				ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
857 				      PAGE_SIZE);
858 		}
859 	}
860 
861 	gfx_v10_0_check_fw_write_wait(adev);
862 out:
863 	if (err) {
864 		dev_err(adev->dev,
865 			"gfx10: Failed to load firmware \"%s\"\n",
866 			fw_name);
867 		release_firmware(adev->gfx.pfp_fw);
868 		adev->gfx.pfp_fw = NULL;
869 		release_firmware(adev->gfx.me_fw);
870 		adev->gfx.me_fw = NULL;
871 		release_firmware(adev->gfx.ce_fw);
872 		adev->gfx.ce_fw = NULL;
873 		release_firmware(adev->gfx.rlc_fw);
874 		adev->gfx.rlc_fw = NULL;
875 		release_firmware(adev->gfx.mec_fw);
876 		adev->gfx.mec_fw = NULL;
877 		release_firmware(adev->gfx.mec2_fw);
878 		adev->gfx.mec2_fw = NULL;
879 	}
880 
881 	gfx_v10_0_check_gfxoff_flag(adev);
882 
883 	return err;
884 }
885 
886 static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
887 {
888 	u32 count = 0;
889 	const struct cs_section_def *sect = NULL;
890 	const struct cs_extent_def *ext = NULL;
891 
892 	/* begin clear state */
893 	count += 2;
894 	/* context control state */
895 	count += 3;
896 
897 	for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
898 		for (ext = sect->section; ext->extent != NULL; ++ext) {
899 			if (sect->id == SECT_CONTEXT)
900 				count += 2 + ext->reg_count;
901 			else
902 				return 0;
903 		}
904 	}
905 
906 	/* set PA_SC_TILE_STEERING_OVERRIDE */
907 	count += 3;
908 	/* end clear state */
909 	count += 2;
910 	/* clear state */
911 	count += 2;
912 
913 	return count;
914 }
915 
916 static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
917 				    volatile u32 *buffer)
918 {
919 	u32 count = 0, i;
920 	const struct cs_section_def *sect = NULL;
921 	const struct cs_extent_def *ext = NULL;
922 	int ctx_reg_offset;
923 
924 	if (adev->gfx.rlc.cs_data == NULL)
925 		return;
926 	if (buffer == NULL)
927 		return;
928 
929 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
930 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
931 
932 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
933 	buffer[count++] = cpu_to_le32(0x80000000);
934 	buffer[count++] = cpu_to_le32(0x80000000);
935 
936 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
937 		for (ext = sect->section; ext->extent != NULL; ++ext) {
938 			if (sect->id == SECT_CONTEXT) {
939 				buffer[count++] =
940 					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
941 				buffer[count++] = cpu_to_le32(ext->reg_index -
942 						PACKET3_SET_CONTEXT_REG_START);
943 				for (i = 0; i < ext->reg_count; i++)
944 					buffer[count++] = cpu_to_le32(ext->extent[i]);
945 			} else {
946 				return;
947 			}
948 		}
949 	}
950 
951 	ctx_reg_offset =
952 		SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
953 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
954 	buffer[count++] = cpu_to_le32(ctx_reg_offset);
955 	buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
956 
957 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
958 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
959 
960 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
961 	buffer[count++] = cpu_to_le32(0);
962 }
963 
964 static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
965 {
966 	/* clear state block */
967 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
968 			&adev->gfx.rlc.clear_state_gpu_addr,
969 			(void **)&adev->gfx.rlc.cs_ptr);
970 
971 	/* jump table block */
972 	amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
973 			&adev->gfx.rlc.cp_table_gpu_addr,
974 			(void **)&adev->gfx.rlc.cp_table_ptr);
975 }
976 
977 static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
978 {
979 	const struct cs_section_def *cs_data;
980 	int r;
981 
982 	adev->gfx.rlc.cs_data = gfx10_cs_data;
983 
984 	cs_data = adev->gfx.rlc.cs_data;
985 
986 	if (cs_data) {
987 		/* init clear state block */
988 		r = amdgpu_gfx_rlc_init_csb(adev);
989 		if (r)
990 			return r;
991 	}
992 
993 	return 0;
994 }
995 
996 static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev)
997 {
998 	int r;
999 
1000 	r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
1001 	if (unlikely(r != 0))
1002 		return r;
1003 
1004 	r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1005 			AMDGPU_GEM_DOMAIN_VRAM);
1006 	if (!r)
1007 		adev->gfx.rlc.clear_state_gpu_addr =
1008 			amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1009 
1010 	amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1011 
1012 	return r;
1013 }
1014 
1015 static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev)
1016 {
1017 	int r;
1018 
1019 	if (!adev->gfx.rlc.clear_state_obj)
1020 		return;
1021 
1022 	r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1023 	if (likely(r == 0)) {
1024 		amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1025 		amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1026 	}
1027 }
1028 
1029 static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
1030 {
1031 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1032 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1033 }
1034 
1035 static int gfx_v10_0_me_init(struct amdgpu_device *adev)
1036 {
1037 	int r;
1038 
1039 	bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
1040 
1041 	amdgpu_gfx_graphics_queue_acquire(adev);
1042 
1043 	r = gfx_v10_0_init_microcode(adev);
1044 	if (r)
1045 		DRM_ERROR("Failed to load gfx firmware!\n");
1046 
1047 	return r;
1048 }
1049 
1050 static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
1051 {
1052 	int r;
1053 	u32 *hpd;
1054 	const __le32 *fw_data = NULL;
1055 	unsigned fw_size;
1056 	u32 *fw = NULL;
1057 	size_t mec_hpd_size;
1058 
1059 	const struct gfx_firmware_header_v1_0 *mec_hdr = NULL;
1060 
1061 	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1062 
1063 	/* take ownership of the relevant compute queues */
1064 	amdgpu_gfx_compute_queue_acquire(adev);
1065 	mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE;
1066 
1067 	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1068 				      AMDGPU_GEM_DOMAIN_GTT,
1069 				      &adev->gfx.mec.hpd_eop_obj,
1070 				      &adev->gfx.mec.hpd_eop_gpu_addr,
1071 				      (void **)&hpd);
1072 	if (r) {
1073 		dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1074 		gfx_v10_0_mec_fini(adev);
1075 		return r;
1076 	}
1077 
1078 	memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1079 
1080 	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1081 	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1082 
1083 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1084 		mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1085 
1086 		fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1087 			 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1088 		fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1089 
1090 		r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1091 					      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1092 					      &adev->gfx.mec.mec_fw_obj,
1093 					      &adev->gfx.mec.mec_fw_gpu_addr,
1094 					      (void **)&fw);
1095 		if (r) {
1096 			dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
1097 			gfx_v10_0_mec_fini(adev);
1098 			return r;
1099 		}
1100 
1101 		memcpy(fw, fw_data, fw_size);
1102 
1103 		amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1104 		amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1105 	}
1106 
1107 	return 0;
1108 }
1109 
1110 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
1111 {
1112 	WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1113 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1114 		(address << SQ_IND_INDEX__INDEX__SHIFT));
1115 	return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1116 }
1117 
1118 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
1119 			   uint32_t thread, uint32_t regno,
1120 			   uint32_t num, uint32_t *out)
1121 {
1122 	WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1123 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1124 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
1125 		(thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
1126 		(SQ_IND_INDEX__AUTO_INCR_MASK));
1127 	while (num--)
1128 		*(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1129 }
1130 
1131 static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1132 {
1133 	/* in gfx10 the SIMD_ID is specified as part of the INSTANCE
1134 	 * field when performing a select_se_sh so it should be
1135 	 * zero here */
1136 	WARN_ON(simd != 0);
1137 
1138 	/* type 2 wave data */
1139 	dst[(*no_fields)++] = 2;
1140 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
1141 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
1142 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
1143 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
1144 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
1145 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
1146 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
1147 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_INST_DW0);
1148 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
1149 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
1150 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
1151 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
1152 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
1153 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
1154 	dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
1155 }
1156 
1157 static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1158 				     uint32_t wave, uint32_t start,
1159 				     uint32_t size, uint32_t *dst)
1160 {
1161 	WARN_ON(simd != 0);
1162 
1163 	wave_read_regs(
1164 		adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
1165 		dst);
1166 }
1167 
1168 static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1169 				      uint32_t wave, uint32_t thread,
1170 				      uint32_t start, uint32_t size,
1171 				      uint32_t *dst)
1172 {
1173 	wave_read_regs(
1174 		adev, wave, thread,
1175 		start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1176 }
1177 
1178 static void gfx_v10_0_select_me_pipe_q(struct amdgpu_device *adev,
1179 									  u32 me, u32 pipe, u32 q, u32 vm)
1180  {
1181        nv_grbm_select(adev, me, pipe, q, vm);
1182  }
1183 
1184 
1185 static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
1186 	.get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter,
1187 	.select_se_sh = &gfx_v10_0_select_se_sh,
1188 	.read_wave_data = &gfx_v10_0_read_wave_data,
1189 	.read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
1190 	.read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
1191 	.select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
1192 };
1193 
1194 static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
1195 {
1196 	u32 gb_addr_config;
1197 
1198 	adev->gfx.funcs = &gfx_v10_0_gfx_funcs;
1199 
1200 	switch (adev->asic_type) {
1201 	case CHIP_NAVI10:
1202 	case CHIP_NAVI14:
1203 	case CHIP_NAVI12:
1204 		adev->gfx.config.max_hw_contexts = 8;
1205 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1206 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1207 		adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1208 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1209 		gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1210 		break;
1211 	default:
1212 		BUG();
1213 		break;
1214 	}
1215 
1216 	adev->gfx.config.gb_addr_config = gb_addr_config;
1217 
1218 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1219 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1220 				      GB_ADDR_CONFIG, NUM_PIPES);
1221 
1222 	adev->gfx.config.max_tile_pipes =
1223 		adev->gfx.config.gb_addr_config_fields.num_pipes;
1224 
1225 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1226 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1227 				      GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
1228 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1229 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1230 				      GB_ADDR_CONFIG, NUM_RB_PER_SE);
1231 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1232 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1233 				      GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
1234 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1235 			REG_GET_FIELD(adev->gfx.config.gb_addr_config,
1236 				      GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
1237 }
1238 
1239 static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1240 				   int me, int pipe, int queue)
1241 {
1242 	int r;
1243 	struct amdgpu_ring *ring;
1244 	unsigned int irq_type;
1245 
1246 	ring = &adev->gfx.gfx_ring[ring_id];
1247 
1248 	ring->me = me;
1249 	ring->pipe = pipe;
1250 	ring->queue = queue;
1251 
1252 	ring->ring_obj = NULL;
1253 	ring->use_doorbell = true;
1254 
1255 	if (!ring_id)
1256 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1257 	else
1258 		ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1259 	sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1260 
1261 	irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1262 	r = amdgpu_ring_init(adev, ring, 1024,
1263 			     &adev->gfx.eop_irq, irq_type);
1264 	if (r)
1265 		return r;
1266 	return 0;
1267 }
1268 
1269 static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1270 				       int mec, int pipe, int queue)
1271 {
1272 	int r;
1273 	unsigned irq_type;
1274 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1275 
1276 	ring = &adev->gfx.compute_ring[ring_id];
1277 
1278 	/* mec0 is me1 */
1279 	ring->me = mec + 1;
1280 	ring->pipe = pipe;
1281 	ring->queue = queue;
1282 
1283 	ring->ring_obj = NULL;
1284 	ring->use_doorbell = true;
1285 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1286 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1287 				+ (ring_id * GFX10_MEC_HPD_SIZE);
1288 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1289 
1290 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1291 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1292 		+ ring->pipe;
1293 
1294 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1295 	r = amdgpu_ring_init(adev, ring, 1024,
1296 			     &adev->gfx.eop_irq, irq_type);
1297 	if (r)
1298 		return r;
1299 
1300 	return 0;
1301 }
1302 
1303 static int gfx_v10_0_sw_init(void *handle)
1304 {
1305 	int i, j, k, r, ring_id = 0;
1306 	struct amdgpu_kiq *kiq;
1307 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1308 
1309 	switch (adev->asic_type) {
1310 	case CHIP_NAVI10:
1311 	case CHIP_NAVI14:
1312 	case CHIP_NAVI12:
1313 		adev->gfx.me.num_me = 1;
1314 		adev->gfx.me.num_pipe_per_me = 2;
1315 		adev->gfx.me.num_queue_per_pipe = 1;
1316 		adev->gfx.mec.num_mec = 2;
1317 		adev->gfx.mec.num_pipe_per_mec = 4;
1318 		adev->gfx.mec.num_queue_per_pipe = 8;
1319 		break;
1320 	default:
1321 		adev->gfx.me.num_me = 1;
1322 		adev->gfx.me.num_pipe_per_me = 1;
1323 		adev->gfx.me.num_queue_per_pipe = 1;
1324 		adev->gfx.mec.num_mec = 1;
1325 		adev->gfx.mec.num_pipe_per_mec = 4;
1326 		adev->gfx.mec.num_queue_per_pipe = 8;
1327 		break;
1328 	}
1329 
1330 	/* KIQ event */
1331 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1332 			      GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT,
1333 			      &adev->gfx.kiq.irq);
1334 	if (r)
1335 		return r;
1336 
1337 	/* EOP Event */
1338 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1339 			      GFX_10_1__SRCID__CP_EOP_INTERRUPT,
1340 			      &adev->gfx.eop_irq);
1341 	if (r)
1342 		return r;
1343 
1344 	/* Privileged reg */
1345 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT,
1346 			      &adev->gfx.priv_reg_irq);
1347 	if (r)
1348 		return r;
1349 
1350 	/* Privileged inst */
1351 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_INSTR_FAULT,
1352 			      &adev->gfx.priv_inst_irq);
1353 	if (r)
1354 		return r;
1355 
1356 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1357 
1358 	gfx_v10_0_scratch_init(adev);
1359 
1360 	r = gfx_v10_0_me_init(adev);
1361 	if (r)
1362 		return r;
1363 
1364 	r = gfx_v10_0_rlc_init(adev);
1365 	if (r) {
1366 		DRM_ERROR("Failed to init rlc BOs!\n");
1367 		return r;
1368 	}
1369 
1370 	r = gfx_v10_0_mec_init(adev);
1371 	if (r) {
1372 		DRM_ERROR("Failed to init MEC BOs!\n");
1373 		return r;
1374 	}
1375 
1376 	/* set up the gfx ring */
1377 	for (i = 0; i < adev->gfx.me.num_me; i++) {
1378 		for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1379 			for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1380 				if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1381 					continue;
1382 
1383 				r = gfx_v10_0_gfx_ring_init(adev, ring_id,
1384 							    i, k, j);
1385 				if (r)
1386 					return r;
1387 				ring_id++;
1388 			}
1389 		}
1390 	}
1391 
1392 	ring_id = 0;
1393 	/* set up the compute queues - allocate horizontally across pipes */
1394 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1395 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1396 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1397 				if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
1398 								     j))
1399 					continue;
1400 
1401 				r = gfx_v10_0_compute_ring_init(adev, ring_id,
1402 								i, k, j);
1403 				if (r)
1404 					return r;
1405 
1406 				ring_id++;
1407 			}
1408 		}
1409 	}
1410 
1411 	r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE);
1412 	if (r) {
1413 		DRM_ERROR("Failed to init KIQ BOs!\n");
1414 		return r;
1415 	}
1416 
1417 	kiq = &adev->gfx.kiq;
1418 	r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1419 	if (r)
1420 		return r;
1421 
1422 	r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd));
1423 	if (r)
1424 		return r;
1425 
1426 	/* allocate visible FB for rlc auto-loading fw */
1427 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1428 		r = gfx_v10_0_rlc_backdoor_autoload_buffer_init(adev);
1429 		if (r)
1430 			return r;
1431 	}
1432 
1433 	adev->gfx.ce_ram_size = F32_CE_PROGRAM_RAM_SIZE;
1434 
1435 	gfx_v10_0_gpu_early_init(adev);
1436 
1437 	return 0;
1438 }
1439 
1440 static void gfx_v10_0_pfp_fini(struct amdgpu_device *adev)
1441 {
1442 	amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1443 			      &adev->gfx.pfp.pfp_fw_gpu_addr,
1444 			      (void **)&adev->gfx.pfp.pfp_fw_ptr);
1445 }
1446 
1447 static void gfx_v10_0_ce_fini(struct amdgpu_device *adev)
1448 {
1449 	amdgpu_bo_free_kernel(&adev->gfx.ce.ce_fw_obj,
1450 			      &adev->gfx.ce.ce_fw_gpu_addr,
1451 			      (void **)&adev->gfx.ce.ce_fw_ptr);
1452 }
1453 
1454 static void gfx_v10_0_me_fini(struct amdgpu_device *adev)
1455 {
1456 	amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1457 			      &adev->gfx.me.me_fw_gpu_addr,
1458 			      (void **)&adev->gfx.me.me_fw_ptr);
1459 }
1460 
1461 static int gfx_v10_0_sw_fini(void *handle)
1462 {
1463 	int i;
1464 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1465 
1466 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1467 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1468 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
1469 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1470 
1471 	amdgpu_gfx_mqd_sw_fini(adev);
1472 	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
1473 	amdgpu_gfx_kiq_fini(adev);
1474 
1475 	gfx_v10_0_pfp_fini(adev);
1476 	gfx_v10_0_ce_fini(adev);
1477 	gfx_v10_0_me_fini(adev);
1478 	gfx_v10_0_rlc_fini(adev);
1479 	gfx_v10_0_mec_fini(adev);
1480 
1481 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1482 		gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev);
1483 
1484 	gfx_v10_0_free_microcode(adev);
1485 
1486 	return 0;
1487 }
1488 
1489 
1490 static void gfx_v10_0_tiling_mode_table_init(struct amdgpu_device *adev)
1491 {
1492 	/* TODO */
1493 }
1494 
1495 static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1496 				   u32 sh_num, u32 instance)
1497 {
1498 	u32 data;
1499 
1500 	if (instance == 0xffffffff)
1501 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1502 				     INSTANCE_BROADCAST_WRITES, 1);
1503 	else
1504 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1505 				     instance);
1506 
1507 	if (se_num == 0xffffffff)
1508 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1509 				     1);
1510 	else
1511 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1512 
1513 	if (sh_num == 0xffffffff)
1514 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1515 				     1);
1516 	else
1517 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1518 
1519 	WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1520 }
1521 
1522 static u32 gfx_v10_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1523 {
1524 	u32 data, mask;
1525 
1526 	data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1527 	data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1528 
1529 	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1530 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1531 
1532 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1533 					 adev->gfx.config.max_sh_per_se);
1534 
1535 	return (~data) & mask;
1536 }
1537 
1538 static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
1539 {
1540 	int i, j;
1541 	u32 data;
1542 	u32 active_rbs = 0;
1543 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1544 					adev->gfx.config.max_sh_per_se;
1545 
1546 	mutex_lock(&adev->grbm_idx_mutex);
1547 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1548 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1549 			gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1550 			data = gfx_v10_0_get_rb_active_bitmap(adev);
1551 			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1552 					       rb_bitmap_width_per_sh);
1553 		}
1554 	}
1555 	gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1556 	mutex_unlock(&adev->grbm_idx_mutex);
1557 
1558 	adev->gfx.config.backend_enable_mask = active_rbs;
1559 	adev->gfx.config.num_rbs = hweight32(active_rbs);
1560 }
1561 
1562 static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *adev)
1563 {
1564 	uint32_t num_sc;
1565 	uint32_t enabled_rb_per_sh;
1566 	uint32_t active_rb_bitmap;
1567 	uint32_t num_rb_per_sc;
1568 	uint32_t num_packer_per_sc;
1569 	uint32_t pa_sc_tile_steering_override;
1570 
1571 	/* init num_sc */
1572 	num_sc = adev->gfx.config.max_shader_engines * adev->gfx.config.max_sh_per_se *
1573 			adev->gfx.config.num_sc_per_sh;
1574 	/* init num_rb_per_sc */
1575 	active_rb_bitmap = gfx_v10_0_get_rb_active_bitmap(adev);
1576 	enabled_rb_per_sh = hweight32(active_rb_bitmap);
1577 	num_rb_per_sc = enabled_rb_per_sh / adev->gfx.config.num_sc_per_sh;
1578 	/* init num_packer_per_sc */
1579 	num_packer_per_sc = adev->gfx.config.num_packer_per_sc;
1580 
1581 	pa_sc_tile_steering_override = 0;
1582 	pa_sc_tile_steering_override |=
1583 		(order_base_2(num_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_SC__SHIFT) &
1584 		PA_SC_TILE_STEERING_OVERRIDE__NUM_SC_MASK;
1585 	pa_sc_tile_steering_override |=
1586 		(order_base_2(num_rb_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC__SHIFT) &
1587 		PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC_MASK;
1588 	pa_sc_tile_steering_override |=
1589 		(order_base_2(num_packer_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC__SHIFT) &
1590 		PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC_MASK;
1591 
1592 	return pa_sc_tile_steering_override;
1593 }
1594 
1595 #define DEFAULT_SH_MEM_BASES	(0x6000)
1596 #define FIRST_COMPUTE_VMID	(8)
1597 #define LAST_COMPUTE_VMID	(16)
1598 
1599 static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
1600 {
1601 	int i;
1602 	uint32_t sh_mem_bases;
1603 
1604 	/*
1605 	 * Configure apertures:
1606 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1607 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1608 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1609 	 */
1610 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1611 
1612 	mutex_lock(&adev->srbm_mutex);
1613 	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1614 		nv_grbm_select(adev, 0, 0, 0, i);
1615 		/* CP and shaders */
1616 		WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1617 		WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1618 	}
1619 	nv_grbm_select(adev, 0, 0, 0, 0);
1620 	mutex_unlock(&adev->srbm_mutex);
1621 
1622 	/* Initialize all compute VMIDs to have no GDS, GWS, or OA
1623 	   acccess. These should be enabled by FW for target VMIDs. */
1624 	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1625 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
1626 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
1627 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
1628 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
1629 	}
1630 }
1631 
1632 static void gfx_v10_0_init_gds_vmid(struct amdgpu_device *adev)
1633 {
1634 	int vmid;
1635 
1636 	/*
1637 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1638 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1639 	 * the driver can enable them for graphics. VMID0 should maintain
1640 	 * access so that HWS firmware can save/restore entries.
1641 	 */
1642 	for (vmid = 1; vmid < 16; vmid++) {
1643 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
1644 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
1645 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
1646 		WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
1647 	}
1648 }
1649 
1650 
1651 static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
1652 {
1653 	int i, j, k;
1654 	int max_wgp_per_sh = adev->gfx.config.max_cu_per_sh >> 1;
1655 	u32 tmp, wgp_active_bitmap = 0;
1656 	u32 gcrd_targets_disable_tcp = 0;
1657 	u32 utcl_invreq_disable = 0;
1658 	/*
1659 	 * GCRD_TARGETS_DISABLE field contains
1660 	 * for Navi10/Navi12: GL1C=[18:15], SQC=[14:10], TCP=[9:0]
1661 	 * for Navi14: GL1C=[21:18], SQC=[17:12], TCP=[11:0]
1662 	 */
1663 	u32 gcrd_targets_disable_mask = amdgpu_gfx_create_bitmask(
1664 		2 * max_wgp_per_sh + /* TCP */
1665 		max_wgp_per_sh + /* SQC */
1666 		4); /* GL1C */
1667 	/*
1668 	 * UTCL1_UTCL0_INVREQ_DISABLE field contains
1669 	 * for Navi10Navi12: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0]
1670 	 * for Navi14: SQG=[28], RMI=[27:24], SQC=[23:12], TCP=[11:0]
1671 	 */
1672 	u32 utcl_invreq_disable_mask = amdgpu_gfx_create_bitmask(
1673 		2 * max_wgp_per_sh + /* TCP */
1674 		2 * max_wgp_per_sh + /* SQC */
1675 		4 + /* RMI */
1676 		1); /* SQG */
1677 
1678 	if (adev->asic_type == CHIP_NAVI10 ||
1679 	    adev->asic_type == CHIP_NAVI14 ||
1680 	    adev->asic_type == CHIP_NAVI12) {
1681 		mutex_lock(&adev->grbm_idx_mutex);
1682 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1683 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1684 				gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
1685 				wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
1686 				/*
1687 				 * Set corresponding TCP bits for the inactive WGPs in
1688 				 * GCRD_SA_TARGETS_DISABLE
1689 				 */
1690 				gcrd_targets_disable_tcp = 0;
1691 				/* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */
1692 				utcl_invreq_disable = 0;
1693 
1694 				for (k = 0; k < max_wgp_per_sh; k++) {
1695 					if (!(wgp_active_bitmap & (1 << k))) {
1696 						gcrd_targets_disable_tcp |= 3 << (2 * k);
1697 						utcl_invreq_disable |= (3 << (2 * k)) |
1698 							(3 << (2 * (max_wgp_per_sh + k)));
1699 					}
1700 				}
1701 
1702 				tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE);
1703 				/* only override TCP & SQC bits */
1704 				tmp &= 0xffffffff << (4 * max_wgp_per_sh);
1705 				tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask);
1706 				WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp);
1707 
1708 				tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE);
1709 				/* only override TCP bits */
1710 				tmp &= 0xffffffff << (2 * max_wgp_per_sh);
1711 				tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask);
1712 				WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp);
1713 			}
1714 		}
1715 
1716 		gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1717 		mutex_unlock(&adev->grbm_idx_mutex);
1718 	}
1719 }
1720 
1721 static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
1722 {
1723 	/* TCCs are global (not instanced). */
1724 	uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
1725 			       RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
1726 
1727 	adev->gfx.config.tcc_disabled_mask =
1728 		REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1729 		(REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1730 }
1731 
1732 static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
1733 {
1734 	u32 tmp;
1735 	int i;
1736 
1737 	WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1738 
1739 	gfx_v10_0_tiling_mode_table_init(adev);
1740 
1741 	gfx_v10_0_setup_rb(adev);
1742 	gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
1743 	gfx_v10_0_get_tcc_info(adev);
1744 	adev->gfx.config.pa_sc_tile_steering_override =
1745 		gfx_v10_0_init_pa_sc_tile_steering_override(adev);
1746 
1747 	/* XXX SH_MEM regs */
1748 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1749 	mutex_lock(&adev->srbm_mutex);
1750 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
1751 		nv_grbm_select(adev, 0, 0, 0, i);
1752 		/* CP and shaders */
1753 		WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1754 		if (i != 0) {
1755 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1756 				(adev->gmc.private_aperture_start >> 48));
1757 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1758 				(adev->gmc.shared_aperture_start >> 48));
1759 			WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1760 		}
1761 	}
1762 	nv_grbm_select(adev, 0, 0, 0, 0);
1763 
1764 	mutex_unlock(&adev->srbm_mutex);
1765 
1766 	gfx_v10_0_init_compute_vmid(adev);
1767 	gfx_v10_0_init_gds_vmid(adev);
1768 
1769 }
1770 
1771 static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1772 					       bool enable)
1773 {
1774 	u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1775 
1776 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1777 			    enable ? 1 : 0);
1778 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1779 			    enable ? 1 : 0);
1780 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1781 			    enable ? 1 : 0);
1782 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1783 			    enable ? 1 : 0);
1784 
1785 	WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1786 }
1787 
1788 static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
1789 {
1790 	/* csib */
1791 	WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
1792 		     adev->gfx.rlc.clear_state_gpu_addr >> 32);
1793 	WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
1794 		     adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1795 	WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1796 }
1797 
1798 static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
1799 {
1800 	int i;
1801 
1802 	gfx_v10_0_init_csb(adev);
1803 
1804 	for (i = 0; i < adev->num_vmhubs; i++)
1805 		amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
1806 
1807 	/* TODO: init power gating */
1808 	return;
1809 }
1810 
1811 void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
1812 {
1813 	u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
1814 
1815 	tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1816 	WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
1817 }
1818 
1819 static void gfx_v10_0_rlc_reset(struct amdgpu_device *adev)
1820 {
1821 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1822 	udelay(50);
1823 	WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1824 	udelay(50);
1825 }
1826 
1827 static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1828 					     bool enable)
1829 {
1830 	uint32_t rlc_pg_cntl;
1831 
1832 	rlc_pg_cntl = RREG32_SOC15(GC, 0, mmRLC_PG_CNTL);
1833 
1834 	if (!enable) {
1835 		/* RLC_PG_CNTL[23] = 0 (default)
1836 		 * RLC will wait for handshake acks with SMU
1837 		 * GFXOFF will be enabled
1838 		 * RLC_PG_CNTL[23] = 1
1839 		 * RLC will not issue any message to SMU
1840 		 * hence no handshake between SMU & RLC
1841 		 * GFXOFF will be disabled
1842 		 */
1843 		rlc_pg_cntl |= 0x800000;
1844 	} else
1845 		rlc_pg_cntl &= ~0x800000;
1846 	WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, rlc_pg_cntl);
1847 }
1848 
1849 static void gfx_v10_0_rlc_start(struct amdgpu_device *adev)
1850 {
1851 	/* TODO: enable rlc & smu handshake until smu
1852 	 * and gfxoff feature works as expected */
1853 	if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1854 		gfx_v10_0_rlc_smu_handshake_cntl(adev, false);
1855 
1856 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1857 	udelay(50);
1858 }
1859 
1860 static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev)
1861 {
1862 	uint32_t tmp;
1863 
1864 	/* enable Save Restore Machine */
1865 	tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1866 	tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1867 	tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1868 	WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1869 }
1870 
1871 static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
1872 {
1873 	const struct rlc_firmware_header_v2_0 *hdr;
1874 	const __le32 *fw_data;
1875 	unsigned i, fw_size;
1876 
1877 	if (!adev->gfx.rlc_fw)
1878 		return -EINVAL;
1879 
1880 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1881 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1882 
1883 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1884 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1885 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1886 
1887 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
1888 		     RLCG_UCODE_LOADING_START_ADDRESS);
1889 
1890 	for (i = 0; i < fw_size; i++)
1891 		WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA,
1892 			     le32_to_cpup(fw_data++));
1893 
1894 	WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1895 
1896 	return 0;
1897 }
1898 
1899 static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
1900 {
1901 	int r;
1902 
1903 	if (amdgpu_sriov_vf(adev))
1904 		return 0;
1905 
1906 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1907 		r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1908 		if (r)
1909 			return r;
1910 		gfx_v10_0_init_pg(adev);
1911 
1912 		/* enable RLC SRM */
1913 		gfx_v10_0_rlc_enable_srm(adev);
1914 
1915 	} else {
1916 		adev->gfx.rlc.funcs->stop(adev);
1917 
1918 		/* disable CG */
1919 		WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
1920 
1921 		/* disable PG */
1922 		WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
1923 
1924 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1925 			/* legacy rlc firmware loading */
1926 			r = gfx_v10_0_rlc_load_microcode(adev);
1927 			if (r)
1928 				return r;
1929 		} else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1930 			/* rlc backdoor autoload firmware */
1931 			r = gfx_v10_0_rlc_backdoor_autoload_enable(adev);
1932 			if (r)
1933 				return r;
1934 		}
1935 
1936 		gfx_v10_0_init_pg(adev);
1937 		adev->gfx.rlc.funcs->start(adev);
1938 
1939 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1940 			r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
1941 			if (r)
1942 				return r;
1943 		}
1944 	}
1945 	return 0;
1946 }
1947 
1948 static struct {
1949 	FIRMWARE_ID	id;
1950 	unsigned int	offset;
1951 	unsigned int	size;
1952 } rlc_autoload_info[FIRMWARE_ID_MAX];
1953 
1954 static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev)
1955 {
1956 	int ret;
1957 	RLC_TABLE_OF_CONTENT *rlc_toc;
1958 
1959 	ret = amdgpu_bo_create_reserved(adev, adev->psp.toc_bin_size, PAGE_SIZE,
1960 					AMDGPU_GEM_DOMAIN_GTT,
1961 					&adev->gfx.rlc.rlc_toc_bo,
1962 					&adev->gfx.rlc.rlc_toc_gpu_addr,
1963 					(void **)&adev->gfx.rlc.rlc_toc_buf);
1964 	if (ret) {
1965 		dev_err(adev->dev, "(%d) failed to create rlc toc bo\n", ret);
1966 		return ret;
1967 	}
1968 
1969 	/* Copy toc from psp sos fw to rlc toc buffer */
1970 	memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc_start_addr, adev->psp.toc_bin_size);
1971 
1972 	rlc_toc = (RLC_TABLE_OF_CONTENT *)adev->gfx.rlc.rlc_toc_buf;
1973 	while (rlc_toc && (rlc_toc->id > FIRMWARE_ID_INVALID) &&
1974 		(rlc_toc->id < FIRMWARE_ID_MAX)) {
1975 		if ((rlc_toc->id >= FIRMWARE_ID_CP_CE) &&
1976 		    (rlc_toc->id <= FIRMWARE_ID_CP_MES)) {
1977 			/* Offset needs 4KB alignment */
1978 			rlc_toc->offset = ALIGN(rlc_toc->offset * 4, PAGE_SIZE);
1979 		}
1980 
1981 		rlc_autoload_info[rlc_toc->id].id = rlc_toc->id;
1982 		rlc_autoload_info[rlc_toc->id].offset = rlc_toc->offset * 4;
1983 		rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4;
1984 
1985 		rlc_toc++;
1986 	};
1987 
1988 	return 0;
1989 }
1990 
1991 static uint32_t gfx_v10_0_calc_toc_total_size(struct amdgpu_device *adev)
1992 {
1993 	uint32_t total_size = 0;
1994 	FIRMWARE_ID id;
1995 	int ret;
1996 
1997 	ret = gfx_v10_0_parse_rlc_toc(adev);
1998 	if (ret) {
1999 		dev_err(adev->dev, "failed to parse rlc toc\n");
2000 		return 0;
2001 	}
2002 
2003 	for (id = FIRMWARE_ID_RLC_G_UCODE; id < FIRMWARE_ID_MAX; id++)
2004 		total_size += rlc_autoload_info[id].size;
2005 
2006 	/* In case the offset in rlc toc ucode is aligned */
2007 	if (total_size < rlc_autoload_info[FIRMWARE_ID_MAX-1].offset)
2008 		total_size = rlc_autoload_info[FIRMWARE_ID_MAX-1].offset +
2009 				rlc_autoload_info[FIRMWARE_ID_MAX-1].size;
2010 
2011 	return total_size;
2012 }
2013 
2014 static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev)
2015 {
2016 	int r;
2017 	uint32_t total_size;
2018 
2019 	total_size = gfx_v10_0_calc_toc_total_size(adev);
2020 
2021 	r = amdgpu_bo_create_reserved(adev, total_size, PAGE_SIZE,
2022 				      AMDGPU_GEM_DOMAIN_GTT,
2023 				      &adev->gfx.rlc.rlc_autoload_bo,
2024 				      &adev->gfx.rlc.rlc_autoload_gpu_addr,
2025 				      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
2026 	if (r) {
2027 		dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
2028 		return r;
2029 	}
2030 
2031 	return 0;
2032 }
2033 
2034 static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev)
2035 {
2036 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_toc_bo,
2037 			      &adev->gfx.rlc.rlc_toc_gpu_addr,
2038 			      (void **)&adev->gfx.rlc.rlc_toc_buf);
2039 	amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
2040 			      &adev->gfx.rlc.rlc_autoload_gpu_addr,
2041 			      (void **)&adev->gfx.rlc.rlc_autoload_ptr);
2042 }
2043 
2044 static void gfx_v10_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
2045 						       FIRMWARE_ID id,
2046 						       const void *fw_data,
2047 						       uint32_t fw_size)
2048 {
2049 	uint32_t toc_offset;
2050 	uint32_t toc_fw_size;
2051 	char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
2052 
2053 	if (id <= FIRMWARE_ID_INVALID || id >= FIRMWARE_ID_MAX)
2054 		return;
2055 
2056 	toc_offset = rlc_autoload_info[id].offset;
2057 	toc_fw_size = rlc_autoload_info[id].size;
2058 
2059 	if (fw_size == 0)
2060 		fw_size = toc_fw_size;
2061 
2062 	if (fw_size > toc_fw_size)
2063 		fw_size = toc_fw_size;
2064 
2065 	memcpy(ptr + toc_offset, fw_data, fw_size);
2066 
2067 	if (fw_size < toc_fw_size)
2068 		memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
2069 }
2070 
2071 static void gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev)
2072 {
2073 	void *data;
2074 	uint32_t size;
2075 
2076 	data = adev->gfx.rlc.rlc_toc_buf;
2077 	size = rlc_autoload_info[FIRMWARE_ID_RLC_TOC].size;
2078 
2079 	gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2080 						   FIRMWARE_ID_RLC_TOC,
2081 						   data, size);
2082 }
2083 
2084 static void gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev)
2085 {
2086 	const __le32 *fw_data;
2087 	uint32_t fw_size;
2088 	const struct gfx_firmware_header_v1_0 *cp_hdr;
2089 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
2090 
2091 	/* pfp ucode */
2092 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2093 		adev->gfx.pfp_fw->data;
2094 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2095 		le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2096 	fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2097 	gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2098 						   FIRMWARE_ID_CP_PFP,
2099 						   fw_data, fw_size);
2100 
2101 	/* ce ucode */
2102 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2103 		adev->gfx.ce_fw->data;
2104 	fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2105 		le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2106 	fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2107 	gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2108 						   FIRMWARE_ID_CP_CE,
2109 						   fw_data, fw_size);
2110 
2111 	/* me ucode */
2112 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2113 		adev->gfx.me_fw->data;
2114 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2115 		le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2116 	fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
2117 	gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2118 						   FIRMWARE_ID_CP_ME,
2119 						   fw_data, fw_size);
2120 
2121 	/* rlc ucode */
2122 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
2123 		adev->gfx.rlc_fw->data;
2124 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2125 		le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
2126 	fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
2127 	gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2128 						   FIRMWARE_ID_RLC_G_UCODE,
2129 						   fw_data, fw_size);
2130 
2131 	/* mec1 ucode */
2132 	cp_hdr = (const struct gfx_firmware_header_v1_0 *)
2133 		adev->gfx.mec_fw->data;
2134 	fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
2135 		le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
2136 	fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
2137 		cp_hdr->jt_size * 4;
2138 	gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2139 						   FIRMWARE_ID_CP_MEC,
2140 						   fw_data, fw_size);
2141 	/* mec2 ucode is not necessary if mec2 ucode is same as mec1 */
2142 }
2143 
2144 /* Temporarily put sdma part here */
2145 static void gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev)
2146 {
2147 	const __le32 *fw_data;
2148 	uint32_t fw_size;
2149 	const struct sdma_firmware_header_v1_0 *sdma_hdr;
2150 	int i;
2151 
2152 	for (i = 0; i < adev->sdma.num_instances; i++) {
2153 		sdma_hdr = (const struct sdma_firmware_header_v1_0 *)
2154 			adev->sdma.instance[i].fw->data;
2155 		fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data +
2156 			le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
2157 		fw_size = le32_to_cpu(sdma_hdr->header.ucode_size_bytes);
2158 
2159 		if (i == 0) {
2160 			gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2161 				FIRMWARE_ID_SDMA0_UCODE, fw_data, fw_size);
2162 			gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2163 				FIRMWARE_ID_SDMA0_JT,
2164 				(uint32_t *)fw_data +
2165 				sdma_hdr->jt_offset,
2166 				sdma_hdr->jt_size * 4);
2167 		} else if (i == 1) {
2168 			gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2169 				FIRMWARE_ID_SDMA1_UCODE, fw_data, fw_size);
2170 			gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev,
2171 				FIRMWARE_ID_SDMA1_JT,
2172 				(uint32_t *)fw_data +
2173 				sdma_hdr->jt_offset,
2174 				sdma_hdr->jt_size * 4);
2175 		}
2176 	}
2177 }
2178 
2179 static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
2180 {
2181 	uint32_t rlc_g_offset, rlc_g_size, tmp;
2182 	uint64_t gpu_addr;
2183 
2184 	gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(adev);
2185 	gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(adev);
2186 	gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(adev);
2187 
2188 	rlc_g_offset = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].offset;
2189 	rlc_g_size = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].size;
2190 	gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
2191 
2192 	WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_HI, upper_32_bits(gpu_addr));
2193 	WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_LO, lower_32_bits(gpu_addr));
2194 	WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_SIZE, rlc_g_size);
2195 
2196 	tmp = RREG32_SOC15(GC, 0, mmRLC_HYP_RESET_VECTOR);
2197 	if (!(tmp & (RLC_HYP_RESET_VECTOR__COLD_BOOT_EXIT_MASK |
2198 		   RLC_HYP_RESET_VECTOR__VDDGFX_EXIT_MASK))) {
2199 		DRM_ERROR("Neither COLD_BOOT_EXIT nor VDDGFX_EXIT is set\n");
2200 		return -EINVAL;
2201 	}
2202 
2203 	tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
2204 	if (tmp & RLC_CNTL__RLC_ENABLE_F32_MASK) {
2205 		DRM_ERROR("RLC ROM should halt itself\n");
2206 		return -EINVAL;
2207 	}
2208 
2209 	return 0;
2210 }
2211 
2212 static int gfx_v10_0_rlc_backdoor_autoload_config_me_cache(struct amdgpu_device *adev)
2213 {
2214 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2215 	uint32_t tmp;
2216 	int i;
2217 	uint64_t addr;
2218 
2219 	/* Trigger an invalidation of the L1 instruction caches */
2220 	tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2221 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2222 	WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2223 
2224 	/* Wait for invalidation complete */
2225 	for (i = 0; i < usec_timeout; i++) {
2226 		tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2227 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2228 			INVALIDATE_CACHE_COMPLETE))
2229 			break;
2230 		udelay(1);
2231 	}
2232 
2233 	if (i >= usec_timeout) {
2234 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2235 		return -EINVAL;
2236 	}
2237 
2238 	/* Program me ucode address into intruction cache address register */
2239 	addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2240 		rlc_autoload_info[FIRMWARE_ID_CP_ME].offset;
2241 	WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2242 			lower_32_bits(addr) & 0xFFFFF000);
2243 	WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2244 			upper_32_bits(addr));
2245 
2246 	return 0;
2247 }
2248 
2249 static int gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(struct amdgpu_device *adev)
2250 {
2251 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2252 	uint32_t tmp;
2253 	int i;
2254 	uint64_t addr;
2255 
2256 	/* Trigger an invalidation of the L1 instruction caches */
2257 	tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2258 	tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2259 	WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2260 
2261 	/* Wait for invalidation complete */
2262 	for (i = 0; i < usec_timeout; i++) {
2263 		tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2264 		if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2265 			INVALIDATE_CACHE_COMPLETE))
2266 			break;
2267 		udelay(1);
2268 	}
2269 
2270 	if (i >= usec_timeout) {
2271 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2272 		return -EINVAL;
2273 	}
2274 
2275 	/* Program ce ucode address into intruction cache address register */
2276 	addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2277 		rlc_autoload_info[FIRMWARE_ID_CP_CE].offset;
2278 	WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2279 			lower_32_bits(addr) & 0xFFFFF000);
2280 	WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2281 			upper_32_bits(addr));
2282 
2283 	return 0;
2284 }
2285 
2286 static int gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(struct amdgpu_device *adev)
2287 {
2288 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2289 	uint32_t tmp;
2290 	int i;
2291 	uint64_t addr;
2292 
2293 	/* Trigger an invalidation of the L1 instruction caches */
2294 	tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2295 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2296 	WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2297 
2298 	/* Wait for invalidation complete */
2299 	for (i = 0; i < usec_timeout; i++) {
2300 		tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2301 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2302 			INVALIDATE_CACHE_COMPLETE))
2303 			break;
2304 		udelay(1);
2305 	}
2306 
2307 	if (i >= usec_timeout) {
2308 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2309 		return -EINVAL;
2310 	}
2311 
2312 	/* Program pfp ucode address into intruction cache address register */
2313 	addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2314 		rlc_autoload_info[FIRMWARE_ID_CP_PFP].offset;
2315 	WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2316 			lower_32_bits(addr) & 0xFFFFF000);
2317 	WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2318 			upper_32_bits(addr));
2319 
2320 	return 0;
2321 }
2322 
2323 static int gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(struct amdgpu_device *adev)
2324 {
2325 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2326 	uint32_t tmp;
2327 	int i;
2328 	uint64_t addr;
2329 
2330 	/* Trigger an invalidation of the L1 instruction caches */
2331 	tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2332 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2333 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2334 
2335 	/* Wait for invalidation complete */
2336 	for (i = 0; i < usec_timeout; i++) {
2337 		tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2338 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2339 			INVALIDATE_CACHE_COMPLETE))
2340 			break;
2341 		udelay(1);
2342 	}
2343 
2344 	if (i >= usec_timeout) {
2345 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2346 		return -EINVAL;
2347 	}
2348 
2349 	/* Program mec1 ucode address into intruction cache address register */
2350 	addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2351 		rlc_autoload_info[FIRMWARE_ID_CP_MEC].offset;
2352 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2353 			lower_32_bits(addr) & 0xFFFFF000);
2354 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2355 			upper_32_bits(addr));
2356 
2357 	return 0;
2358 }
2359 
2360 static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2361 {
2362 	uint32_t cp_status;
2363 	uint32_t bootload_status;
2364 	int i, r;
2365 
2366 	for (i = 0; i < adev->usec_timeout; i++) {
2367 		cp_status = RREG32_SOC15(GC, 0, mmCP_STAT);
2368 		bootload_status = RREG32_SOC15(GC, 0, mmRLC_RLCS_BOOTLOAD_STATUS);
2369 		if ((cp_status == 0) &&
2370 		    (REG_GET_FIELD(bootload_status,
2371 			RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2372 			break;
2373 		}
2374 		udelay(1);
2375 	}
2376 
2377 	if (i >= adev->usec_timeout) {
2378 		dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2379 		return -ETIMEDOUT;
2380 	}
2381 
2382 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2383 		r = gfx_v10_0_rlc_backdoor_autoload_config_me_cache(adev);
2384 		if (r)
2385 			return r;
2386 
2387 		r = gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(adev);
2388 		if (r)
2389 			return r;
2390 
2391 		r = gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(adev);
2392 		if (r)
2393 			return r;
2394 
2395 		r = gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(adev);
2396 		if (r)
2397 			return r;
2398 	}
2399 
2400 	return 0;
2401 }
2402 
2403 static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2404 {
2405 	int i;
2406 	u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2407 
2408 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2409 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2410 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2411 	if (!enable) {
2412 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2413 			adev->gfx.gfx_ring[i].sched.ready = false;
2414 	}
2415 	WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2416 
2417 	for (i = 0; i < adev->usec_timeout; i++) {
2418 		if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
2419 			break;
2420 		udelay(1);
2421 	}
2422 
2423 	if (i >= adev->usec_timeout)
2424 		DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
2425 
2426 	return 0;
2427 }
2428 
2429 static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2430 {
2431 	int r;
2432 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2433 	const __le32 *fw_data;
2434 	unsigned i, fw_size;
2435 	uint32_t tmp;
2436 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2437 
2438 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2439 		adev->gfx.pfp_fw->data;
2440 
2441 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2442 
2443 	fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2444 		le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2445 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2446 
2447 	r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2448 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2449 				      &adev->gfx.pfp.pfp_fw_obj,
2450 				      &adev->gfx.pfp.pfp_fw_gpu_addr,
2451 				      (void **)&adev->gfx.pfp.pfp_fw_ptr);
2452 	if (r) {
2453 		dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2454 		gfx_v10_0_pfp_fini(adev);
2455 		return r;
2456 	}
2457 
2458 	memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2459 
2460 	amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2461 	amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2462 
2463 	/* Trigger an invalidation of the L1 instruction caches */
2464 	tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2465 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2466 	WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp);
2467 
2468 	/* Wait for invalidation complete */
2469 	for (i = 0; i < usec_timeout; i++) {
2470 		tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL);
2471 		if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2472 			INVALIDATE_CACHE_COMPLETE))
2473 			break;
2474 		udelay(1);
2475 	}
2476 
2477 	if (i >= usec_timeout) {
2478 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2479 		return -EINVAL;
2480 	}
2481 
2482 	if (amdgpu_emu_mode == 1)
2483 		adev->nbio.funcs->hdp_flush(adev, NULL);
2484 
2485 	tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
2486 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2487 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2488 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2489 	tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2490 	WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL, tmp);
2491 	WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO,
2492 		adev->gfx.pfp.pfp_fw_gpu_addr & 0xFFFFF000);
2493 	WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
2494 		upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2495 
2496 	return 0;
2497 }
2498 
2499 static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
2500 {
2501 	int r;
2502 	const struct gfx_firmware_header_v1_0 *ce_hdr;
2503 	const __le32 *fw_data;
2504 	unsigned i, fw_size;
2505 	uint32_t tmp;
2506 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2507 
2508 	ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2509 		adev->gfx.ce_fw->data;
2510 
2511 	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2512 
2513 	fw_data = (const __le32 *)(adev->gfx.ce_fw->data +
2514 		le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2515 	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes);
2516 
2517 	r = amdgpu_bo_create_reserved(adev, ce_hdr->header.ucode_size_bytes,
2518 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2519 				      &adev->gfx.ce.ce_fw_obj,
2520 				      &adev->gfx.ce.ce_fw_gpu_addr,
2521 				      (void **)&adev->gfx.ce.ce_fw_ptr);
2522 	if (r) {
2523 		dev_err(adev->dev, "(%d) failed to create ce fw bo\n", r);
2524 		gfx_v10_0_ce_fini(adev);
2525 		return r;
2526 	}
2527 
2528 	memcpy(adev->gfx.ce.ce_fw_ptr, fw_data, fw_size);
2529 
2530 	amdgpu_bo_kunmap(adev->gfx.ce.ce_fw_obj);
2531 	amdgpu_bo_unreserve(adev->gfx.ce.ce_fw_obj);
2532 
2533 	/* Trigger an invalidation of the L1 instruction caches */
2534 	tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2535 	tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2536 	WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp);
2537 
2538 	/* Wait for invalidation complete */
2539 	for (i = 0; i < usec_timeout; i++) {
2540 		tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL);
2541 		if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL,
2542 			INVALIDATE_CACHE_COMPLETE))
2543 			break;
2544 		udelay(1);
2545 	}
2546 
2547 	if (i >= usec_timeout) {
2548 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2549 		return -EINVAL;
2550 	}
2551 
2552 	if (amdgpu_emu_mode == 1)
2553 		adev->nbio.funcs->hdp_flush(adev, NULL);
2554 
2555 	tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
2556 	tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
2557 	tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, CACHE_POLICY, 0);
2558 	tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, EXE_DISABLE, 0);
2559 	tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2560 	WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO,
2561 		adev->gfx.ce.ce_fw_gpu_addr & 0xFFFFF000);
2562 	WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
2563 		upper_32_bits(adev->gfx.ce.ce_fw_gpu_addr));
2564 
2565 	return 0;
2566 }
2567 
2568 static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2569 {
2570 	int r;
2571 	const struct gfx_firmware_header_v1_0 *me_hdr;
2572 	const __le32 *fw_data;
2573 	unsigned i, fw_size;
2574 	uint32_t tmp;
2575 	uint32_t usec_timeout = 50000;  /* wait for 50ms */
2576 
2577 	me_hdr = (const struct gfx_firmware_header_v1_0 *)
2578 		adev->gfx.me_fw->data;
2579 
2580 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2581 
2582 	fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2583 		le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2584 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2585 
2586 	r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2587 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2588 				      &adev->gfx.me.me_fw_obj,
2589 				      &adev->gfx.me.me_fw_gpu_addr,
2590 				      (void **)&adev->gfx.me.me_fw_ptr);
2591 	if (r) {
2592 		dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2593 		gfx_v10_0_me_fini(adev);
2594 		return r;
2595 	}
2596 
2597 	memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2598 
2599 	amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2600 	amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2601 
2602 	/* Trigger an invalidation of the L1 instruction caches */
2603 	tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2604 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2605 	WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp);
2606 
2607 	/* Wait for invalidation complete */
2608 	for (i = 0; i < usec_timeout; i++) {
2609 		tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL);
2610 		if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2611 			INVALIDATE_CACHE_COMPLETE))
2612 			break;
2613 		udelay(1);
2614 	}
2615 
2616 	if (i >= usec_timeout) {
2617 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2618 		return -EINVAL;
2619 	}
2620 
2621 	if (amdgpu_emu_mode == 1)
2622 		adev->nbio.funcs->hdp_flush(adev, NULL);
2623 
2624 	tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
2625 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2626 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2627 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2628 	tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2629 	WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO,
2630 		adev->gfx.me.me_fw_gpu_addr & 0xFFFFF000);
2631 	WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
2632 		upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
2633 
2634 	return 0;
2635 }
2636 
2637 static int gfx_v10_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2638 {
2639 	int r;
2640 
2641 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2642 		return -EINVAL;
2643 
2644 	gfx_v10_0_cp_gfx_enable(adev, false);
2645 
2646 	r = gfx_v10_0_cp_gfx_load_pfp_microcode(adev);
2647 	if (r) {
2648 		dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
2649 		return r;
2650 	}
2651 
2652 	r = gfx_v10_0_cp_gfx_load_ce_microcode(adev);
2653 	if (r) {
2654 		dev_err(adev->dev, "(%d) failed to load ce fw\n", r);
2655 		return r;
2656 	}
2657 
2658 	r = gfx_v10_0_cp_gfx_load_me_microcode(adev);
2659 	if (r) {
2660 		dev_err(adev->dev, "(%d) failed to load me fw\n", r);
2661 		return r;
2662 	}
2663 
2664 	return 0;
2665 }
2666 
2667 static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
2668 {
2669 	struct amdgpu_ring *ring;
2670 	const struct cs_section_def *sect = NULL;
2671 	const struct cs_extent_def *ext = NULL;
2672 	int r, i;
2673 	int ctx_reg_offset;
2674 
2675 	/* init the CP */
2676 	WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT,
2677 		     adev->gfx.config.max_hw_contexts - 1);
2678 	WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2679 
2680 	gfx_v10_0_cp_gfx_enable(adev, true);
2681 
2682 	ring = &adev->gfx.gfx_ring[0];
2683 	r = amdgpu_ring_alloc(ring, gfx_v10_0_get_csb_size(adev) + 4);
2684 	if (r) {
2685 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2686 		return r;
2687 	}
2688 
2689 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2690 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2691 
2692 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2693 	amdgpu_ring_write(ring, 0x80000000);
2694 	amdgpu_ring_write(ring, 0x80000000);
2695 
2696 	for (sect = gfx10_cs_data; sect->section != NULL; ++sect) {
2697 		for (ext = sect->section; ext->extent != NULL; ++ext) {
2698 			if (sect->id == SECT_CONTEXT) {
2699 				amdgpu_ring_write(ring,
2700 						  PACKET3(PACKET3_SET_CONTEXT_REG,
2701 							  ext->reg_count));
2702 				amdgpu_ring_write(ring, ext->reg_index -
2703 						  PACKET3_SET_CONTEXT_REG_START);
2704 				for (i = 0; i < ext->reg_count; i++)
2705 					amdgpu_ring_write(ring, ext->extent[i]);
2706 			}
2707 		}
2708 	}
2709 
2710 	ctx_reg_offset =
2711 		SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
2712 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
2713 	amdgpu_ring_write(ring, ctx_reg_offset);
2714 	amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
2715 
2716 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2717 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2718 
2719 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2720 	amdgpu_ring_write(ring, 0);
2721 
2722 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2723 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2724 	amdgpu_ring_write(ring, 0x8000);
2725 	amdgpu_ring_write(ring, 0x8000);
2726 
2727 	amdgpu_ring_commit(ring);
2728 
2729 	/* submit cs packet to copy state 0 to next available state */
2730 	ring = &adev->gfx.gfx_ring[1];
2731 	r = amdgpu_ring_alloc(ring, 2);
2732 	if (r) {
2733 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2734 		return r;
2735 	}
2736 
2737 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2738 	amdgpu_ring_write(ring, 0);
2739 
2740 	amdgpu_ring_commit(ring);
2741 
2742 	return 0;
2743 }
2744 
2745 static void gfx_v10_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
2746 					 CP_PIPE_ID pipe)
2747 {
2748 	u32 tmp;
2749 
2750 	tmp = RREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL);
2751 	tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
2752 
2753 	WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, tmp);
2754 }
2755 
2756 static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
2757 					  struct amdgpu_ring *ring)
2758 {
2759 	u32 tmp;
2760 
2761 	tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2762 	if (ring->use_doorbell) {
2763 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2764 				    DOORBELL_OFFSET, ring->doorbell_index);
2765 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2766 				    DOORBELL_EN, 1);
2767 	} else {
2768 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2769 				    DOORBELL_EN, 0);
2770 	}
2771 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2772 	tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2773 			    DOORBELL_RANGE_LOWER, ring->doorbell_index);
2774 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2775 
2776 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2777 		     CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2778 }
2779 
2780 static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
2781 {
2782 	struct amdgpu_ring *ring;
2783 	u32 tmp;
2784 	u32 rb_bufsz;
2785 	u64 rb_addr, rptr_addr, wptr_gpu_addr;
2786 	u32 i;
2787 
2788 	/* Set the write pointer delay */
2789 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2790 
2791 	/* set the RB to use vmid 0 */
2792 	WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2793 
2794 	/* Init gfx ring 0 for pipe 0 */
2795 	mutex_lock(&adev->srbm_mutex);
2796 	gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2797 	mutex_unlock(&adev->srbm_mutex);
2798 	/* Set ring buffer size */
2799 	ring = &adev->gfx.gfx_ring[0];
2800 	rb_bufsz = order_base_2(ring->ring_size / 8);
2801 	tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2802 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2803 #ifdef __BIG_ENDIAN
2804 	tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2805 #endif
2806 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2807 
2808 	/* Initialize the ring buffer's write pointers */
2809 	ring->wptr = 0;
2810 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2811 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2812 
2813 	/* set the wb address wether it's enabled or not */
2814 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2815 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2816 	WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2817 		     CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2818 
2819 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2820 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2821 		     lower_32_bits(wptr_gpu_addr));
2822 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2823 		     upper_32_bits(wptr_gpu_addr));
2824 
2825 	mdelay(1);
2826 	WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2827 
2828 	rb_addr = ring->gpu_addr >> 8;
2829 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2830 	WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2831 
2832 	WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1);
2833 
2834 	gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2835 
2836 	/* Init gfx ring 1 for pipe 1 */
2837 	mutex_lock(&adev->srbm_mutex);
2838 	gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
2839 	mutex_unlock(&adev->srbm_mutex);
2840 	ring = &adev->gfx.gfx_ring[1];
2841 	rb_bufsz = order_base_2(ring->ring_size / 8);
2842 	tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
2843 	tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
2844 	WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2845 	/* Initialize the ring buffer's write pointers */
2846 	ring->wptr = 0;
2847 	WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
2848 	WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
2849 	/* Set the wb address wether it's enabled or not */
2850 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2851 	WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
2852 	WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
2853 		CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2854 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2855 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
2856 		lower_32_bits(wptr_gpu_addr));
2857 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
2858 		upper_32_bits(wptr_gpu_addr));
2859 
2860 	mdelay(1);
2861 	WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
2862 
2863 	rb_addr = ring->gpu_addr >> 8;
2864 	WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
2865 	WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
2866 	WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
2867 
2868 	gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
2869 
2870 	/* Switch to pipe 0 */
2871 	mutex_lock(&adev->srbm_mutex);
2872 	gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
2873 	mutex_unlock(&adev->srbm_mutex);
2874 
2875 	/* start the ring */
2876 	gfx_v10_0_cp_gfx_start(adev);
2877 
2878 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2879 		ring = &adev->gfx.gfx_ring[i];
2880 		ring->sched.ready = true;
2881 	}
2882 
2883 	return 0;
2884 }
2885 
2886 static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2887 {
2888 	int i;
2889 
2890 	if (enable) {
2891 		WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2892 	} else {
2893 		WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2894 			     (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
2895 			      CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2896 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
2897 			adev->gfx.compute_ring[i].sched.ready = false;
2898 		adev->gfx.kiq.ring.sched.ready = false;
2899 	}
2900 	udelay(50);
2901 }
2902 
2903 static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2904 {
2905 	const struct gfx_firmware_header_v1_0 *mec_hdr;
2906 	const __le32 *fw_data;
2907 	unsigned i;
2908 	u32 tmp;
2909 	u32 usec_timeout = 50000; /* Wait for 50 ms */
2910 
2911 	if (!adev->gfx.mec_fw)
2912 		return -EINVAL;
2913 
2914 	gfx_v10_0_cp_compute_enable(adev, false);
2915 
2916 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2917 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2918 
2919 	fw_data = (const __le32 *)
2920 		(adev->gfx.mec_fw->data +
2921 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2922 
2923 	/* Trigger an invalidation of the L1 instruction caches */
2924 	tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2925 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2926 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp);
2927 
2928 	/* Wait for invalidation complete */
2929 	for (i = 0; i < usec_timeout; i++) {
2930 		tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL);
2931 		if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2932 				       INVALIDATE_CACHE_COMPLETE))
2933 			break;
2934 		udelay(1);
2935 	}
2936 
2937 	if (i >= usec_timeout) {
2938 		dev_err(adev->dev, "failed to invalidate instruction cache\n");
2939 		return -EINVAL;
2940 	}
2941 
2942 	if (amdgpu_emu_mode == 1)
2943 		adev->nbio.funcs->hdp_flush(adev, NULL);
2944 
2945 	tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
2946 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2947 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2948 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2949 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2950 
2951 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr &
2952 		     0xFFFFF000);
2953 	WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2954 		     upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2955 
2956 	/* MEC1 */
2957 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 0);
2958 
2959 	for (i = 0; i < mec_hdr->jt_size; i++)
2960 		WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2961 			     le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2962 
2963 	WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
2964 
2965 	/*
2966 	 * TODO: Loading MEC2 firmware is only necessary if MEC2 should run
2967 	 * different microcode than MEC1.
2968 	 */
2969 
2970 	return 0;
2971 }
2972 
2973 static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
2974 {
2975 	uint32_t tmp;
2976 	struct amdgpu_device *adev = ring->adev;
2977 
2978 	/* tell RLC which is KIQ queue */
2979 	tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2980 	tmp &= 0xffffff00;
2981 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2982 	WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2983 	tmp |= 0x80;
2984 	WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2985 }
2986 
2987 static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring)
2988 {
2989 	struct amdgpu_device *adev = ring->adev;
2990 	struct v10_gfx_mqd *mqd = ring->mqd_ptr;
2991 	uint64_t hqd_gpu_addr, wb_gpu_addr;
2992 	uint32_t tmp;
2993 	uint32_t rb_bufsz;
2994 
2995 	/* set up gfx hqd wptr */
2996 	mqd->cp_gfx_hqd_wptr = 0;
2997 	mqd->cp_gfx_hqd_wptr_hi = 0;
2998 
2999 	/* set the pointer to the MQD */
3000 	mqd->cp_mqd_base_addr = ring->mqd_gpu_addr & 0xfffffffc;
3001 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3002 
3003 	/* set up mqd control */
3004 	tmp = RREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL);
3005 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
3006 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
3007 	tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
3008 	mqd->cp_gfx_mqd_control = tmp;
3009 
3010 	/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
3011 	tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID);
3012 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
3013 	mqd->cp_gfx_hqd_vmid = 0;
3014 
3015 	/* set up default queue priority level
3016 	 * 0x0 = low priority, 0x1 = high priority */
3017 	tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY);
3018 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
3019 	mqd->cp_gfx_hqd_queue_priority = tmp;
3020 
3021 	/* set up time quantum */
3022 	tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM);
3023 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
3024 	mqd->cp_gfx_hqd_quantum = tmp;
3025 
3026 	/* set up gfx hqd base. this is similar as CP_RB_BASE */
3027 	hqd_gpu_addr = ring->gpu_addr >> 8;
3028 	mqd->cp_gfx_hqd_base = hqd_gpu_addr;
3029 	mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
3030 
3031 	/* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
3032 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3033 	mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
3034 	mqd->cp_gfx_hqd_rptr_addr_hi =
3035 		upper_32_bits(wb_gpu_addr) & 0xffff;
3036 
3037 	/* set up rb_wptr_poll addr */
3038 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3039 	mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3040 	mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3041 
3042 	/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
3043 	rb_bufsz = order_base_2(ring->ring_size / 4) - 1;
3044 	tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL);
3045 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
3046 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
3047 #ifdef __BIG_ENDIAN
3048 	tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
3049 #endif
3050 	mqd->cp_gfx_hqd_cntl = tmp;
3051 
3052 	/* set up cp_doorbell_control */
3053 	tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3054 	if (ring->use_doorbell) {
3055 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3056 				    DOORBELL_OFFSET, ring->doorbell_index);
3057 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3058 				    DOORBELL_EN, 1);
3059 	} else
3060 		tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3061 				    DOORBELL_EN, 0);
3062 	mqd->cp_rb_doorbell_control = tmp;
3063 
3064 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3065 	ring->wptr = 0;
3066 	mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR);
3067 
3068 	/* active the queue */
3069 	mqd->cp_gfx_hqd_active = 1;
3070 
3071 	return 0;
3072 }
3073 
3074 #ifdef BRING_UP_DEBUG
3075 static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring)
3076 {
3077 	struct amdgpu_device *adev = ring->adev;
3078 	struct v10_gfx_mqd *mqd = ring->mqd_ptr;
3079 
3080 	/* set mmCP_GFX_HQD_WPTR/_HI to 0 */
3081 	WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
3082 	WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
3083 
3084 	/* set GFX_MQD_BASE */
3085 	WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
3086 	WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
3087 
3088 	/* set GFX_MQD_CONTROL */
3089 	WREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
3090 
3091 	/* set GFX_HQD_VMID to 0 */
3092 	WREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
3093 
3094 	WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY,
3095 			mqd->cp_gfx_hqd_queue_priority);
3096 	WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
3097 
3098 	/* set GFX_HQD_BASE, similar as CP_RB_BASE */
3099 	WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
3100 	WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
3101 
3102 	/* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
3103 	WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
3104 	WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
3105 
3106 	/* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
3107 	WREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
3108 
3109 	/* set RB_WPTR_POLL_ADDR */
3110 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
3111 	WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
3112 
3113 	/* set RB_DOORBELL_CONTROL */
3114 	WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
3115 
3116 	/* active the queue */
3117 	WREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
3118 
3119 	return 0;
3120 }
3121 #endif
3122 
3123 static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
3124 {
3125 	struct amdgpu_device *adev = ring->adev;
3126 	struct v10_gfx_mqd *mqd = ring->mqd_ptr;
3127 	int mqd_idx = ring - &adev->gfx.gfx_ring[0];
3128 
3129 	if (!adev->in_gpu_reset && !adev->in_suspend) {
3130 		memset((void *)mqd, 0, sizeof(*mqd));
3131 		mutex_lock(&adev->srbm_mutex);
3132 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3133 		gfx_v10_0_gfx_mqd_init(ring);
3134 #ifdef BRING_UP_DEBUG
3135 		gfx_v10_0_gfx_queue_init_register(ring);
3136 #endif
3137 		nv_grbm_select(adev, 0, 0, 0, 0);
3138 		mutex_unlock(&adev->srbm_mutex);
3139 		if (adev->gfx.me.mqd_backup[mqd_idx])
3140 			memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3141 	} else if (adev->in_gpu_reset) {
3142 		/* reset mqd with the backup copy */
3143 		if (adev->gfx.me.mqd_backup[mqd_idx])
3144 			memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
3145 		/* reset the ring */
3146 		ring->wptr = 0;
3147 		adev->wb.wb[ring->wptr_offs] = 0;
3148 		amdgpu_ring_clear_ring(ring);
3149 #ifdef BRING_UP_DEBUG
3150 		mutex_lock(&adev->srbm_mutex);
3151 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3152 		gfx_v10_0_gfx_queue_init_register(ring);
3153 		nv_grbm_select(adev, 0, 0, 0, 0);
3154 		mutex_unlock(&adev->srbm_mutex);
3155 #endif
3156 	} else {
3157 		amdgpu_ring_clear_ring(ring);
3158 	}
3159 
3160 	return 0;
3161 }
3162 
3163 #ifndef BRING_UP_DEBUG
3164 static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
3165 {
3166 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3167 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3168 	int r, i;
3169 
3170 	if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
3171 		return -EINVAL;
3172 
3173 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
3174 					adev->gfx.num_gfx_rings);
3175 	if (r) {
3176 		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3177 		return r;
3178 	}
3179 
3180 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3181 		kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
3182 
3183 	r = amdgpu_ring_test_ring(kiq_ring);
3184 	if (r) {
3185 		DRM_ERROR("kfq enable failed\n");
3186 		kiq_ring->sched.ready = false;
3187 	}
3188 	return r;
3189 }
3190 #endif
3191 
3192 static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3193 {
3194 	int r, i;
3195 	struct amdgpu_ring *ring;
3196 
3197 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3198 		ring = &adev->gfx.gfx_ring[i];
3199 
3200 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
3201 		if (unlikely(r != 0))
3202 			goto done;
3203 
3204 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3205 		if (!r) {
3206 			r = gfx_v10_0_gfx_init_queue(ring);
3207 			amdgpu_bo_kunmap(ring->mqd_obj);
3208 			ring->mqd_ptr = NULL;
3209 		}
3210 		amdgpu_bo_unreserve(ring->mqd_obj);
3211 		if (r)
3212 			goto done;
3213 	}
3214 #ifndef BRING_UP_DEBUG
3215 	r = gfx_v10_0_kiq_enable_kgq(adev);
3216 	if (r)
3217 		goto done;
3218 #endif
3219 	r = gfx_v10_0_cp_gfx_start(adev);
3220 	if (r)
3221 		goto done;
3222 
3223 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3224 		ring = &adev->gfx.gfx_ring[i];
3225 		ring->sched.ready = true;
3226 	}
3227 done:
3228 	return r;
3229 }
3230 
3231 static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
3232 {
3233 	struct amdgpu_device *adev = ring->adev;
3234 	struct v10_compute_mqd *mqd = ring->mqd_ptr;
3235 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3236 	uint32_t tmp;
3237 
3238 	mqd->header = 0xC0310800;
3239 	mqd->compute_pipelinestat_enable = 0x00000001;
3240 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3241 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3242 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3243 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3244 	mqd->compute_misc_reserved = 0x00000003;
3245 
3246 	eop_base_addr = ring->eop_gpu_addr >> 8;
3247 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3248 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3249 
3250 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3251 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3252 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3253 			(order_base_2(GFX10_MEC_HPD_SIZE / 4) - 1));
3254 
3255 	mqd->cp_hqd_eop_control = tmp;
3256 
3257 	/* enable doorbell? */
3258 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3259 
3260 	if (ring->use_doorbell) {
3261 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3262 				    DOORBELL_OFFSET, ring->doorbell_index);
3263 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3264 				    DOORBELL_EN, 1);
3265 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3266 				    DOORBELL_SOURCE, 0);
3267 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3268 				    DOORBELL_HIT, 0);
3269 	} else {
3270 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3271 				    DOORBELL_EN, 0);
3272 	}
3273 
3274 	mqd->cp_hqd_pq_doorbell_control = tmp;
3275 
3276 	/* disable the queue if it's active */
3277 	ring->wptr = 0;
3278 	mqd->cp_hqd_dequeue_request = 0;
3279 	mqd->cp_hqd_pq_rptr = 0;
3280 	mqd->cp_hqd_pq_wptr_lo = 0;
3281 	mqd->cp_hqd_pq_wptr_hi = 0;
3282 
3283 	/* set the pointer to the MQD */
3284 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3285 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3286 
3287 	/* set MQD vmid to 0 */
3288 	tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3289 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3290 	mqd->cp_mqd_control = tmp;
3291 
3292 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3293 	hqd_gpu_addr = ring->gpu_addr >> 8;
3294 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3295 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3296 
3297 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3298 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3299 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3300 			    (order_base_2(ring->ring_size / 4) - 1));
3301 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3302 			    ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3303 #ifdef __BIG_ENDIAN
3304 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3305 #endif
3306 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3307 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3308 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3309 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3310 	mqd->cp_hqd_pq_control = tmp;
3311 
3312 	/* set the wb address whether it's enabled or not */
3313 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3314 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3315 	mqd->cp_hqd_pq_rptr_report_addr_hi =
3316 		upper_32_bits(wb_gpu_addr) & 0xffff;
3317 
3318 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3319 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3320 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3321 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3322 
3323 	tmp = 0;
3324 	/* enable the doorbell if requested */
3325 	if (ring->use_doorbell) {
3326 		tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3327 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3328 				DOORBELL_OFFSET, ring->doorbell_index);
3329 
3330 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3331 				    DOORBELL_EN, 1);
3332 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3333 				    DOORBELL_SOURCE, 0);
3334 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3335 				    DOORBELL_HIT, 0);
3336 	}
3337 
3338 	mqd->cp_hqd_pq_doorbell_control = tmp;
3339 
3340 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3341 	ring->wptr = 0;
3342 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3343 
3344 	/* set the vmid for the queue */
3345 	mqd->cp_hqd_vmid = 0;
3346 
3347 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3348 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3349 	mqd->cp_hqd_persistent_state = tmp;
3350 
3351 	/* set MIN_IB_AVAIL_SIZE */
3352 	tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3353 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3354 	mqd->cp_hqd_ib_control = tmp;
3355 
3356 	/* activate the queue */
3357 	mqd->cp_hqd_active = 1;
3358 
3359 	return 0;
3360 }
3361 
3362 static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
3363 {
3364 	struct amdgpu_device *adev = ring->adev;
3365 	struct v10_compute_mqd *mqd = ring->mqd_ptr;
3366 	int j;
3367 
3368 	/* disable wptr polling */
3369 	WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3370 
3371 	/* write the EOP addr */
3372 	WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3373 	       mqd->cp_hqd_eop_base_addr_lo);
3374 	WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3375 	       mqd->cp_hqd_eop_base_addr_hi);
3376 
3377 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3378 	WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
3379 	       mqd->cp_hqd_eop_control);
3380 
3381 	/* enable doorbell? */
3382 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3383 	       mqd->cp_hqd_pq_doorbell_control);
3384 
3385 	/* disable the queue if it's active */
3386 	if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3387 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3388 		for (j = 0; j < adev->usec_timeout; j++) {
3389 			if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3390 				break;
3391 			udelay(1);
3392 		}
3393 		WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3394 		       mqd->cp_hqd_dequeue_request);
3395 		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3396 		       mqd->cp_hqd_pq_rptr);
3397 		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3398 		       mqd->cp_hqd_pq_wptr_lo);
3399 		WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3400 		       mqd->cp_hqd_pq_wptr_hi);
3401 	}
3402 
3403 	/* set the pointer to the MQD */
3404 	WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3405 	       mqd->cp_mqd_base_addr_lo);
3406 	WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3407 	       mqd->cp_mqd_base_addr_hi);
3408 
3409 	/* set MQD vmid to 0 */
3410 	WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3411 	       mqd->cp_mqd_control);
3412 
3413 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3414 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3415 	       mqd->cp_hqd_pq_base_lo);
3416 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3417 	       mqd->cp_hqd_pq_base_hi);
3418 
3419 	/* set up the HQD, this is similar to CP_RB0_CNTL */
3420 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3421 	       mqd->cp_hqd_pq_control);
3422 
3423 	/* set the wb address whether it's enabled or not */
3424 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3425 		mqd->cp_hqd_pq_rptr_report_addr_lo);
3426 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3427 		mqd->cp_hqd_pq_rptr_report_addr_hi);
3428 
3429 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3430 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3431 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
3432 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3433 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
3434 
3435 	/* enable the doorbell if requested */
3436 	if (ring->use_doorbell) {
3437 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3438 			(adev->doorbell_index.kiq * 2) << 2);
3439 		WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3440 			(adev->doorbell_index.userqueue_end * 2) << 2);
3441 	}
3442 
3443 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3444 	       mqd->cp_hqd_pq_doorbell_control);
3445 
3446 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3447 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3448 	       mqd->cp_hqd_pq_wptr_lo);
3449 	WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3450 	       mqd->cp_hqd_pq_wptr_hi);
3451 
3452 	/* set the vmid for the queue */
3453 	WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3454 
3455 	WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3456 	       mqd->cp_hqd_persistent_state);
3457 
3458 	/* activate the queue */
3459 	WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3460 	       mqd->cp_hqd_active);
3461 
3462 	if (ring->use_doorbell)
3463 		WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3464 
3465 	return 0;
3466 }
3467 
3468 static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
3469 {
3470 	struct amdgpu_device *adev = ring->adev;
3471 	struct v10_compute_mqd *mqd = ring->mqd_ptr;
3472 	int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3473 
3474 	gfx_v10_0_kiq_setting(ring);
3475 
3476 	if (adev->in_gpu_reset) { /* for GPU_RESET case */
3477 		/* reset MQD to a clean status */
3478 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3479 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3480 
3481 		/* reset ring buffer */
3482 		ring->wptr = 0;
3483 		amdgpu_ring_clear_ring(ring);
3484 
3485 		mutex_lock(&adev->srbm_mutex);
3486 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3487 		gfx_v10_0_kiq_init_register(ring);
3488 		nv_grbm_select(adev, 0, 0, 0, 0);
3489 		mutex_unlock(&adev->srbm_mutex);
3490 	} else {
3491 		memset((void *)mqd, 0, sizeof(*mqd));
3492 		mutex_lock(&adev->srbm_mutex);
3493 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3494 		gfx_v10_0_compute_mqd_init(ring);
3495 		gfx_v10_0_kiq_init_register(ring);
3496 		nv_grbm_select(adev, 0, 0, 0, 0);
3497 		mutex_unlock(&adev->srbm_mutex);
3498 
3499 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3500 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3501 	}
3502 
3503 	return 0;
3504 }
3505 
3506 static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
3507 {
3508 	struct amdgpu_device *adev = ring->adev;
3509 	struct v10_compute_mqd *mqd = ring->mqd_ptr;
3510 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
3511 
3512 	if (!adev->in_gpu_reset && !adev->in_suspend) {
3513 		memset((void *)mqd, 0, sizeof(*mqd));
3514 		mutex_lock(&adev->srbm_mutex);
3515 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3516 		gfx_v10_0_compute_mqd_init(ring);
3517 		nv_grbm_select(adev, 0, 0, 0, 0);
3518 		mutex_unlock(&adev->srbm_mutex);
3519 
3520 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3521 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3522 	} else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3523 		/* reset MQD to a clean status */
3524 		if (adev->gfx.mec.mqd_backup[mqd_idx])
3525 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
3526 
3527 		/* reset ring buffer */
3528 		ring->wptr = 0;
3529 		amdgpu_ring_clear_ring(ring);
3530 	} else {
3531 		amdgpu_ring_clear_ring(ring);
3532 	}
3533 
3534 	return 0;
3535 }
3536 
3537 static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
3538 {
3539 	struct amdgpu_ring *ring;
3540 	int r;
3541 
3542 	ring = &adev->gfx.kiq.ring;
3543 
3544 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
3545 	if (unlikely(r != 0))
3546 		return r;
3547 
3548 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3549 	if (unlikely(r != 0))
3550 		return r;
3551 
3552 	gfx_v10_0_kiq_init_queue(ring);
3553 	amdgpu_bo_kunmap(ring->mqd_obj);
3554 	ring->mqd_ptr = NULL;
3555 	amdgpu_bo_unreserve(ring->mqd_obj);
3556 	ring->sched.ready = true;
3557 	return 0;
3558 }
3559 
3560 static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
3561 {
3562 	struct amdgpu_ring *ring = NULL;
3563 	int r = 0, i;
3564 
3565 	gfx_v10_0_cp_compute_enable(adev, true);
3566 
3567 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3568 		ring = &adev->gfx.compute_ring[i];
3569 
3570 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
3571 		if (unlikely(r != 0))
3572 			goto done;
3573 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3574 		if (!r) {
3575 			r = gfx_v10_0_kcq_init_queue(ring);
3576 			amdgpu_bo_kunmap(ring->mqd_obj);
3577 			ring->mqd_ptr = NULL;
3578 		}
3579 		amdgpu_bo_unreserve(ring->mqd_obj);
3580 		if (r)
3581 			goto done;
3582 	}
3583 
3584 	r = amdgpu_gfx_enable_kcq(adev);
3585 done:
3586 	return r;
3587 }
3588 
3589 static int gfx_v10_0_cp_resume(struct amdgpu_device *adev)
3590 {
3591 	int r, i;
3592 	struct amdgpu_ring *ring;
3593 
3594 	if (!(adev->flags & AMD_IS_APU))
3595 		gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3596 
3597 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3598 		/* legacy firmware loading */
3599 		r = gfx_v10_0_cp_gfx_load_microcode(adev);
3600 		if (r)
3601 			return r;
3602 
3603 		r = gfx_v10_0_cp_compute_load_microcode(adev);
3604 		if (r)
3605 			return r;
3606 	}
3607 
3608 	r = gfx_v10_0_kiq_resume(adev);
3609 	if (r)
3610 		return r;
3611 
3612 	r = gfx_v10_0_kcq_resume(adev);
3613 	if (r)
3614 		return r;
3615 
3616 	if (!amdgpu_async_gfx_ring) {
3617 		r = gfx_v10_0_cp_gfx_resume(adev);
3618 		if (r)
3619 			return r;
3620 	} else {
3621 		r = gfx_v10_0_cp_async_gfx_ring_resume(adev);
3622 		if (r)
3623 			return r;
3624 	}
3625 
3626 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3627 		ring = &adev->gfx.gfx_ring[i];
3628 		DRM_INFO("gfx %d ring me %d pipe %d q %d\n",
3629 			 i, ring->me, ring->pipe, ring->queue);
3630 		r = amdgpu_ring_test_ring(ring);
3631 		if (r) {
3632 			ring->sched.ready = false;
3633 			return r;
3634 		}
3635 	}
3636 
3637 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3638 		ring = &adev->gfx.compute_ring[i];
3639 		ring->sched.ready = true;
3640 		DRM_INFO("compute ring %d mec %d pipe %d q %d\n",
3641 			 i, ring->me, ring->pipe, ring->queue);
3642 		r = amdgpu_ring_test_ring(ring);
3643 		if (r)
3644 			ring->sched.ready = false;
3645 	}
3646 
3647 	return 0;
3648 }
3649 
3650 static void gfx_v10_0_cp_enable(struct amdgpu_device *adev, bool enable)
3651 {
3652 	gfx_v10_0_cp_gfx_enable(adev, enable);
3653 	gfx_v10_0_cp_compute_enable(adev, enable);
3654 }
3655 
3656 static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
3657 {
3658 	uint32_t data, pattern = 0xDEADBEEF;
3659 
3660 	/* check if mmVGT_ESGS_RING_SIZE_UMD
3661 	 * has been remapped to mmVGT_ESGS_RING_SIZE */
3662 	data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
3663 
3664 	WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0);
3665 
3666 	WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
3667 
3668 	if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) {
3669 		WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
3670 		return true;
3671 	} else {
3672 		WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
3673 		return false;
3674 	}
3675 }
3676 
3677 static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
3678 {
3679 	uint32_t data;
3680 
3681 	/* initialize cam_index to 0
3682 	 * index will auto-inc after each data writting */
3683 	WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
3684 
3685 	/* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
3686 	data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
3687 		GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3688 	       (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) <<
3689 		GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3690 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3691 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3692 
3693 	/* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */
3694 	data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) <<
3695 		GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3696 	       (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) <<
3697 		GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3698 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3699 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3700 
3701 	/* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */
3702 	data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) <<
3703 		GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3704 	       (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) <<
3705 		GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3706 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3707 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3708 
3709 	/* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */
3710 	data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) <<
3711 		GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3712 	       (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) <<
3713 		GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3714 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3715 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3716 
3717 	/* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */
3718 	data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) <<
3719 		GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3720 	       (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) <<
3721 		GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3722 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3723 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3724 
3725 	/* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */
3726 	data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) <<
3727 		GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3728 	       (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) <<
3729 		GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3730 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3731 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3732 
3733 	/* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */
3734 	data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) <<
3735 		GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
3736 	       (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) <<
3737 		GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
3738 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
3739 	WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
3740 }
3741 
3742 static int gfx_v10_0_hw_init(void *handle)
3743 {
3744 	int r;
3745 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3746 
3747 	r = gfx_v10_0_csb_vram_pin(adev);
3748 	if (r)
3749 		return r;
3750 
3751 	if (!amdgpu_emu_mode)
3752 		gfx_v10_0_init_golden_registers(adev);
3753 
3754 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
3755 		/**
3756 		 * For gfx 10, rlc firmware loading relies on smu firmware is
3757 		 * loaded firstly, so in direct type, it has to load smc ucode
3758 		 * here before rlc.
3759 		 */
3760 		r = smu_load_microcode(&adev->smu);
3761 		if (r)
3762 			return r;
3763 
3764 		r = smu_check_fw_status(&adev->smu);
3765 		if (r) {
3766 			pr_err("SMC firmware status is not correct\n");
3767 			return r;
3768 		}
3769 	}
3770 
3771 	/* if GRBM CAM not remapped, set up the remapping */
3772 	if (!gfx_v10_0_check_grbm_cam_remapping(adev))
3773 		gfx_v10_0_setup_grbm_cam_remapping(adev);
3774 
3775 	gfx_v10_0_constants_init(adev);
3776 
3777 	r = gfx_v10_0_rlc_resume(adev);
3778 	if (r)
3779 		return r;
3780 
3781 	/*
3782 	 * init golden registers and rlc resume may override some registers,
3783 	 * reconfig them here
3784 	 */
3785 	gfx_v10_0_tcp_harvest(adev);
3786 
3787 	r = gfx_v10_0_cp_resume(adev);
3788 	if (r)
3789 		return r;
3790 
3791 	return r;
3792 }
3793 
3794 #ifndef BRING_UP_DEBUG
3795 static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
3796 {
3797 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3798 	struct amdgpu_ring *kiq_ring = &kiq->ring;
3799 	int i;
3800 
3801 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3802 		return -EINVAL;
3803 
3804 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
3805 					adev->gfx.num_gfx_rings))
3806 		return -ENOMEM;
3807 
3808 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3809 		kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
3810 					   PREEMPT_QUEUES, 0, 0);
3811 
3812 	return amdgpu_ring_test_ring(kiq_ring);
3813 }
3814 #endif
3815 
3816 static int gfx_v10_0_hw_fini(void *handle)
3817 {
3818 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3819 	int r;
3820 
3821 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3822 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3823 #ifndef BRING_UP_DEBUG
3824 	if (amdgpu_async_gfx_ring) {
3825 		r = gfx_v10_0_kiq_disable_kgq(adev);
3826 		if (r)
3827 			DRM_ERROR("KGQ disable failed\n");
3828 	}
3829 #endif
3830 	if (amdgpu_gfx_disable_kcq(adev))
3831 		DRM_ERROR("KCQ disable failed\n");
3832 	if (amdgpu_sriov_vf(adev)) {
3833 		pr_debug("For SRIOV client, shouldn't do anything.\n");
3834 		return 0;
3835 	}
3836 	gfx_v10_0_cp_enable(adev, false);
3837 	gfx_v10_0_enable_gui_idle_interrupt(adev, false);
3838 	gfx_v10_0_csb_vram_unpin(adev);
3839 
3840 	return 0;
3841 }
3842 
3843 static int gfx_v10_0_suspend(void *handle)
3844 {
3845 	return gfx_v10_0_hw_fini(handle);
3846 }
3847 
3848 static int gfx_v10_0_resume(void *handle)
3849 {
3850 	return gfx_v10_0_hw_init(handle);
3851 }
3852 
3853 static bool gfx_v10_0_is_idle(void *handle)
3854 {
3855 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3856 
3857 	if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3858 				GRBM_STATUS, GUI_ACTIVE))
3859 		return false;
3860 	else
3861 		return true;
3862 }
3863 
3864 static int gfx_v10_0_wait_for_idle(void *handle)
3865 {
3866 	unsigned i;
3867 	u32 tmp;
3868 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3869 
3870 	for (i = 0; i < adev->usec_timeout; i++) {
3871 		/* read MC_STATUS */
3872 		tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
3873 			GRBM_STATUS__GUI_ACTIVE_MASK;
3874 
3875 		if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3876 			return 0;
3877 		udelay(1);
3878 	}
3879 	return -ETIMEDOUT;
3880 }
3881 
3882 static int gfx_v10_0_soft_reset(void *handle)
3883 {
3884 	u32 grbm_soft_reset = 0;
3885 	u32 tmp;
3886 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3887 
3888 	/* GRBM_STATUS */
3889 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3890 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3891 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3892 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
3893 		   GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
3894 		   GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK
3895 		   | GRBM_STATUS__BCI_BUSY_MASK)) {
3896 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3897 						GRBM_SOFT_RESET, SOFT_RESET_CP,
3898 						1);
3899 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3900 						GRBM_SOFT_RESET, SOFT_RESET_GFX,
3901 						1);
3902 	}
3903 
3904 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3905 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3906 						GRBM_SOFT_RESET, SOFT_RESET_CP,
3907 						1);
3908 	}
3909 
3910 	/* GRBM_STATUS2 */
3911 	tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3912 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3913 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3914 						GRBM_SOFT_RESET, SOFT_RESET_RLC,
3915 						1);
3916 
3917 	if (grbm_soft_reset) {
3918 		/* stop the rlc */
3919 		gfx_v10_0_rlc_stop(adev);
3920 
3921 		/* Disable GFX parsing/prefetching */
3922 		gfx_v10_0_cp_gfx_enable(adev, false);
3923 
3924 		/* Disable MEC parsing/prefetching */
3925 		gfx_v10_0_cp_compute_enable(adev, false);
3926 
3927 		if (grbm_soft_reset) {
3928 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3929 			tmp |= grbm_soft_reset;
3930 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3931 			WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3932 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3933 
3934 			udelay(50);
3935 
3936 			tmp &= ~grbm_soft_reset;
3937 			WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3938 			tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3939 		}
3940 
3941 		/* Wait a little for things to settle down */
3942 		udelay(50);
3943 	}
3944 	return 0;
3945 }
3946 
3947 static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3948 {
3949 	uint64_t clock;
3950 
3951 	mutex_lock(&adev->gfx.gpu_clock_mutex);
3952 	WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3953 	clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3954 		((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3955 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
3956 	return clock;
3957 }
3958 
3959 static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3960 					   uint32_t vmid,
3961 					   uint32_t gds_base, uint32_t gds_size,
3962 					   uint32_t gws_base, uint32_t gws_size,
3963 					   uint32_t oa_base, uint32_t oa_size)
3964 {
3965 	struct amdgpu_device *adev = ring->adev;
3966 
3967 	/* GDS Base */
3968 	gfx_v10_0_write_data_to_reg(ring, 0, false,
3969 				    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3970 				    gds_base);
3971 
3972 	/* GDS Size */
3973 	gfx_v10_0_write_data_to_reg(ring, 0, false,
3974 				    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3975 				    gds_size);
3976 
3977 	/* GWS */
3978 	gfx_v10_0_write_data_to_reg(ring, 0, false,
3979 				    SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3980 				    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3981 
3982 	/* OA */
3983 	gfx_v10_0_write_data_to_reg(ring, 0, false,
3984 				    SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3985 				    (1 << (oa_size + oa_base)) - (1 << oa_base));
3986 }
3987 
3988 static int gfx_v10_0_early_init(void *handle)
3989 {
3990 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3991 
3992 	adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
3993 	adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3994 
3995 	gfx_v10_0_set_kiq_pm4_funcs(adev);
3996 	gfx_v10_0_set_ring_funcs(adev);
3997 	gfx_v10_0_set_irq_funcs(adev);
3998 	gfx_v10_0_set_gds_init(adev);
3999 	gfx_v10_0_set_rlc_funcs(adev);
4000 
4001 	return 0;
4002 }
4003 
4004 static int gfx_v10_0_late_init(void *handle)
4005 {
4006 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4007 	int r;
4008 
4009 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4010 	if (r)
4011 		return r;
4012 
4013 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4014 	if (r)
4015 		return r;
4016 
4017 	return 0;
4018 }
4019 
4020 static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev)
4021 {
4022 	uint32_t rlc_cntl;
4023 
4024 	/* if RLC is not enabled, do nothing */
4025 	rlc_cntl = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4026 	return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
4027 }
4028 
4029 static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
4030 {
4031 	uint32_t data;
4032 	unsigned i;
4033 
4034 	data = RLC_SAFE_MODE__CMD_MASK;
4035 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4036 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4037 
4038 	/* wait for RLC_SAFE_MODE */
4039 	for (i = 0; i < adev->usec_timeout; i++) {
4040 		if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4041 			break;
4042 		udelay(1);
4043 	}
4044 }
4045 
4046 static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
4047 {
4048 	uint32_t data;
4049 
4050 	data = RLC_SAFE_MODE__CMD_MASK;
4051 	WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4052 }
4053 
4054 static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4055 						      bool enable)
4056 {
4057 	uint32_t data, def;
4058 
4059 	/* It is disabled by HW by default */
4060 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4061 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
4062 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4063 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4064 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4065 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4066 
4067 		/* only for Vega10 & Raven1 */
4068 		data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4069 
4070 		if (def != data)
4071 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4072 
4073 		/* MGLS is a global flag to control all MGLS in GFX */
4074 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4075 			/* 2 - RLC memory Light sleep */
4076 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4077 				def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4078 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4079 				if (def != data)
4080 					WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4081 			}
4082 			/* 3 - CP memory Light sleep */
4083 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4084 				def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4085 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4086 				if (def != data)
4087 					WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4088 			}
4089 		}
4090 	} else {
4091 		/* 1 - MGCG_OVERRIDE */
4092 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4093 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4094 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4095 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4096 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4097 		if (def != data)
4098 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4099 
4100 		/* 2 - disable MGLS in RLC */
4101 		data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4102 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4103 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4104 			WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4105 		}
4106 
4107 		/* 3 - disable MGLS in CP */
4108 		data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4109 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4110 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4111 			WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4112 		}
4113 	}
4114 }
4115 
4116 static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev,
4117 					   bool enable)
4118 {
4119 	uint32_t data, def;
4120 
4121 	/* Enable 3D CGCG/CGLS */
4122 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4123 		/* write cmd to clear cgcg/cgls ov */
4124 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4125 		/* unset CGCG override */
4126 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4127 		/* update CGCG and CGLS override bits */
4128 		if (def != data)
4129 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4130 		/* enable 3Dcgcg FSM(0x0000363f) */
4131 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4132 		data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4133 			RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4134 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4135 			data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4136 				RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4137 		if (def != data)
4138 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4139 
4140 		/* set IDLE_POLL_COUNT(0x00900100) */
4141 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4142 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4143 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4144 		if (def != data)
4145 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4146 	} else {
4147 		/* Disable CGCG/CGLS */
4148 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4149 		/* disable cgcg, cgls should be disabled */
4150 		data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4151 			  RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4152 		/* disable cgcg and cgls in FSM */
4153 		if (def != data)
4154 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4155 	}
4156 }
4157 
4158 static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4159 						      bool enable)
4160 {
4161 	uint32_t def, data;
4162 
4163 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4164 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4165 		/* unset CGCG override */
4166 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4167 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4168 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4169 		else
4170 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4171 		/* update CGCG and CGLS override bits */
4172 		if (def != data)
4173 			WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4174 
4175 		/* enable cgcg FSM(0x0000363F) */
4176 		def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4177 		data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4178 			RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4179 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4180 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4181 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4182 		if (def != data)
4183 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4184 
4185 		/* set IDLE_POLL_COUNT(0x00900100) */
4186 		def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4187 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4188 			(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4189 		if (def != data)
4190 			WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4191 	} else {
4192 		def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4193 		/* reset CGCG/CGLS bits */
4194 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4195 		/* disable cgcg and cgls in FSM */
4196 		if (def != data)
4197 			WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4198 	}
4199 }
4200 
4201 static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4202 					    bool enable)
4203 {
4204 	amdgpu_gfx_rlc_enter_safe_mode(adev);
4205 
4206 	if (enable) {
4207 		/* CGCG/CGLS should be enabled after MGCG/MGLS
4208 		 * ===  MGCG + MGLS ===
4209 		 */
4210 		gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
4211 		/* ===  CGCG /CGLS for GFX 3D Only === */
4212 		gfx_v10_0_update_3d_clock_gating(adev, enable);
4213 		/* ===  CGCG + CGLS === */
4214 		gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
4215 	} else {
4216 		/* CGCG/CGLS should be disabled before MGCG/MGLS
4217 		 * ===  CGCG + CGLS ===
4218 		 */
4219 		gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
4220 		/* ===  CGCG /CGLS for GFX 3D Only === */
4221 		gfx_v10_0_update_3d_clock_gating(adev, enable);
4222 		/* ===  MGCG + MGLS === */
4223 		gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
4224 	}
4225 
4226 	if (adev->cg_flags &
4227 	    (AMD_CG_SUPPORT_GFX_MGCG |
4228 	     AMD_CG_SUPPORT_GFX_CGLS |
4229 	     AMD_CG_SUPPORT_GFX_CGCG |
4230 	     AMD_CG_SUPPORT_GFX_CGLS |
4231 	     AMD_CG_SUPPORT_GFX_3D_CGCG |
4232 	     AMD_CG_SUPPORT_GFX_3D_CGLS))
4233 		gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
4234 
4235 	amdgpu_gfx_rlc_exit_safe_mode(adev);
4236 
4237 	return 0;
4238 }
4239 
4240 static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
4241 	.is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
4242 	.set_safe_mode = gfx_v10_0_set_safe_mode,
4243 	.unset_safe_mode = gfx_v10_0_unset_safe_mode,
4244 	.init = gfx_v10_0_rlc_init,
4245 	.get_csb_size = gfx_v10_0_get_csb_size,
4246 	.get_csb_buffer = gfx_v10_0_get_csb_buffer,
4247 	.resume = gfx_v10_0_rlc_resume,
4248 	.stop = gfx_v10_0_rlc_stop,
4249 	.reset = gfx_v10_0_rlc_reset,
4250 	.start = gfx_v10_0_rlc_start
4251 };
4252 
4253 static int gfx_v10_0_set_powergating_state(void *handle,
4254 					  enum amd_powergating_state state)
4255 {
4256 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4257 	bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
4258 	switch (adev->asic_type) {
4259 	case CHIP_NAVI10:
4260 	case CHIP_NAVI14:
4261 		if (!enable) {
4262 			amdgpu_gfx_off_ctrl(adev, false);
4263 			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
4264 		} else
4265 			amdgpu_gfx_off_ctrl(adev, true);
4266 		break;
4267 	default:
4268 		break;
4269 	}
4270 	return 0;
4271 }
4272 
4273 static int gfx_v10_0_set_clockgating_state(void *handle,
4274 					  enum amd_clockgating_state state)
4275 {
4276 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4277 
4278 	switch (adev->asic_type) {
4279 	case CHIP_NAVI10:
4280 	case CHIP_NAVI14:
4281 	case CHIP_NAVI12:
4282 		gfx_v10_0_update_gfx_clock_gating(adev,
4283 						 state == AMD_CG_STATE_GATE ? true : false);
4284 		break;
4285 	default:
4286 		break;
4287 	}
4288 	return 0;
4289 }
4290 
4291 static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags)
4292 {
4293 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4294 	int data;
4295 
4296 	/* AMD_CG_SUPPORT_GFX_MGCG */
4297 	data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4298 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4299 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
4300 
4301 	/* AMD_CG_SUPPORT_GFX_CGCG */
4302 	data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4303 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4304 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
4305 
4306 	/* AMD_CG_SUPPORT_GFX_CGLS */
4307 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4308 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
4309 
4310 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
4311 	data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4312 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
4313 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
4314 
4315 	/* AMD_CG_SUPPORT_GFX_CP_LS */
4316 	data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4317 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
4318 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
4319 
4320 	/* AMD_CG_SUPPORT_GFX_3D_CGCG */
4321 	data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4322 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4323 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4324 
4325 	/* AMD_CG_SUPPORT_GFX_3D_CGLS */
4326 	if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4327 		*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4328 }
4329 
4330 static u64 gfx_v10_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4331 {
4332 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 is 32bit rptr*/
4333 }
4334 
4335 static u64 gfx_v10_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4336 {
4337 	struct amdgpu_device *adev = ring->adev;
4338 	u64 wptr;
4339 
4340 	/* XXX check if swapping is necessary on BE */
4341 	if (ring->use_doorbell) {
4342 		wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4343 	} else {
4344 		wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4345 		wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4346 	}
4347 
4348 	return wptr;
4349 }
4350 
4351 static void gfx_v10_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4352 {
4353 	struct amdgpu_device *adev = ring->adev;
4354 
4355 	if (ring->use_doorbell) {
4356 		/* XXX check if swapping is necessary on BE */
4357 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4358 		WDOORBELL64(ring->doorbell_index, ring->wptr);
4359 	} else {
4360 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4361 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4362 	}
4363 }
4364 
4365 static u64 gfx_v10_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4366 {
4367 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 hardware is 32bit rptr */
4368 }
4369 
4370 static u64 gfx_v10_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4371 {
4372 	u64 wptr;
4373 
4374 	/* XXX check if swapping is necessary on BE */
4375 	if (ring->use_doorbell)
4376 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4377 	else
4378 		BUG();
4379 	return wptr;
4380 }
4381 
4382 static void gfx_v10_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4383 {
4384 	struct amdgpu_device *adev = ring->adev;
4385 
4386 	/* XXX check if swapping is necessary on BE */
4387 	if (ring->use_doorbell) {
4388 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4389 		WDOORBELL64(ring->doorbell_index, ring->wptr);
4390 	} else {
4391 		BUG(); /* only DOORBELL method supported on gfx10 now */
4392 	}
4393 }
4394 
4395 static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4396 {
4397 	struct amdgpu_device *adev = ring->adev;
4398 	u32 ref_and_mask, reg_mem_engine;
4399 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
4400 
4401 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4402 		switch (ring->me) {
4403 		case 1:
4404 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4405 			break;
4406 		case 2:
4407 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4408 			break;
4409 		default:
4410 			return;
4411 		}
4412 		reg_mem_engine = 0;
4413 	} else {
4414 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4415 		reg_mem_engine = 1; /* pfp */
4416 	}
4417 
4418 	gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4419 			       adev->nbio.funcs->get_hdp_flush_req_offset(adev),
4420 			       adev->nbio.funcs->get_hdp_flush_done_offset(adev),
4421 			       ref_and_mask, ref_and_mask, 0x20);
4422 }
4423 
4424 static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4425 				       struct amdgpu_job *job,
4426 				       struct amdgpu_ib *ib,
4427 				       uint32_t flags)
4428 {
4429 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4430 	u32 header, control = 0;
4431 
4432 	if (ib->flags & AMDGPU_IB_FLAG_CE)
4433 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2);
4434 	else
4435 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4436 
4437 	control |= ib->length_dw | (vmid << 24);
4438 
4439 	if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4440 		control |= INDIRECT_BUFFER_PRE_ENB(1);
4441 
4442 		if (flags & AMDGPU_IB_PREEMPTED)
4443 			control |= INDIRECT_BUFFER_PRE_RESUME(1);
4444 
4445 		if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4446 			gfx_v10_0_ring_emit_de_meta(ring,
4447 				    flags & AMDGPU_IB_PREEMPTED ? true : false);
4448 	}
4449 
4450 	amdgpu_ring_write(ring, header);
4451 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4452 	amdgpu_ring_write(ring,
4453 #ifdef __BIG_ENDIAN
4454 		(2 << 0) |
4455 #endif
4456 		lower_32_bits(ib->gpu_addr));
4457 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4458 	amdgpu_ring_write(ring, control);
4459 }
4460 
4461 static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4462 					   struct amdgpu_job *job,
4463 					   struct amdgpu_ib *ib,
4464 					   uint32_t flags)
4465 {
4466 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4467 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4468 
4469 	/* Currently, there is a high possibility to get wave ID mismatch
4470 	 * between ME and GDS, leading to a hw deadlock, because ME generates
4471 	 * different wave IDs than the GDS expects. This situation happens
4472 	 * randomly when at least 5 compute pipes use GDS ordered append.
4473 	 * The wave IDs generated by ME are also wrong after suspend/resume.
4474 	 * Those are probably bugs somewhere else in the kernel driver.
4475 	 *
4476 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
4477 	 * GDS to 0 for this ring (me/pipe).
4478 	 */
4479 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
4480 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4481 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
4482 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
4483 	}
4484 
4485 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4486 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4487 	amdgpu_ring_write(ring,
4488 #ifdef __BIG_ENDIAN
4489 				(2 << 0) |
4490 #endif
4491 				lower_32_bits(ib->gpu_addr));
4492 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4493 	amdgpu_ring_write(ring, control);
4494 }
4495 
4496 static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4497 				     u64 seq, unsigned flags)
4498 {
4499 	struct amdgpu_device *adev = ring->adev;
4500 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4501 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4502 
4503 	/* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */
4504 	if (adev->pdev->device == 0x50)
4505 		int_sel = false;
4506 
4507 	/* RELEASE_MEM - flush caches, send int */
4508 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4509 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
4510 				 PACKET3_RELEASE_MEM_GCR_GL2_WB |
4511 				 PACKET3_RELEASE_MEM_GCR_GLM_INV | /* must be set with GLM_WB */
4512 				 PACKET3_RELEASE_MEM_GCR_GLM_WB |
4513 				 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
4514 				 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4515 				 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
4516 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
4517 				 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
4518 
4519 	/*
4520 	 * the address should be Qword aligned if 64bit write, Dword
4521 	 * aligned if only send 32bit data low (discard data high)
4522 	 */
4523 	if (write64bit)
4524 		BUG_ON(addr & 0x7);
4525 	else
4526 		BUG_ON(addr & 0x3);
4527 	amdgpu_ring_write(ring, lower_32_bits(addr));
4528 	amdgpu_ring_write(ring, upper_32_bits(addr));
4529 	amdgpu_ring_write(ring, lower_32_bits(seq));
4530 	amdgpu_ring_write(ring, upper_32_bits(seq));
4531 	amdgpu_ring_write(ring, 0);
4532 }
4533 
4534 static void gfx_v10_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4535 {
4536 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4537 	uint32_t seq = ring->fence_drv.sync_seq;
4538 	uint64_t addr = ring->fence_drv.gpu_addr;
4539 
4540 	gfx_v10_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
4541 			       upper_32_bits(addr), seq, 0xffffffff, 4);
4542 }
4543 
4544 static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4545 					 unsigned vmid, uint64_t pd_addr)
4546 {
4547 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4548 
4549 	/* compute doesn't have PFP */
4550 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4551 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
4552 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4553 		amdgpu_ring_write(ring, 0x0);
4554 	}
4555 }
4556 
4557 static void gfx_v10_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4558 					  u64 seq, unsigned int flags)
4559 {
4560 	struct amdgpu_device *adev = ring->adev;
4561 
4562 	/* we only allocate 32bit for each seq wb address */
4563 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4564 
4565 	/* write fence seq to the "addr" */
4566 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4567 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4568 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4569 	amdgpu_ring_write(ring, lower_32_bits(addr));
4570 	amdgpu_ring_write(ring, upper_32_bits(addr));
4571 	amdgpu_ring_write(ring, lower_32_bits(seq));
4572 
4573 	if (flags & AMDGPU_FENCE_FLAG_INT) {
4574 		/* set register to trigger INT */
4575 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4576 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4577 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4578 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4579 		amdgpu_ring_write(ring, 0);
4580 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4581 	}
4582 }
4583 
4584 static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
4585 {
4586 	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4587 	amdgpu_ring_write(ring, 0);
4588 }
4589 
4590 static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4591 {
4592 	uint32_t dw2 = 0;
4593 
4594 	if (amdgpu_mcbp)
4595 		gfx_v10_0_ring_emit_ce_meta(ring,
4596 				    flags & AMDGPU_IB_PREEMPTED ? true : false);
4597 
4598 	gfx_v10_0_ring_emit_tmz(ring, true);
4599 
4600 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4601 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4602 		/* set load_global_config & load_global_uconfig */
4603 		dw2 |= 0x8001;
4604 		/* set load_cs_sh_regs */
4605 		dw2 |= 0x01000000;
4606 		/* set load_per_context_state & load_gfx_sh_regs for GFX */
4607 		dw2 |= 0x10002;
4608 
4609 		/* set load_ce_ram if preamble presented */
4610 		if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4611 			dw2 |= 0x10000000;
4612 	} else {
4613 		/* still load_ce_ram if this is the first time preamble presented
4614 		 * although there is no context switch happens.
4615 		 */
4616 		if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4617 			dw2 |= 0x10000000;
4618 	}
4619 
4620 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4621 	amdgpu_ring_write(ring, dw2);
4622 	amdgpu_ring_write(ring, 0);
4623 }
4624 
4625 static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4626 {
4627 	unsigned ret;
4628 
4629 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4630 	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4631 	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4632 	amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4633 	ret = ring->wptr & ring->buf_mask;
4634 	amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4635 
4636 	return ret;
4637 }
4638 
4639 static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4640 {
4641 	unsigned cur;
4642 	BUG_ON(offset > ring->buf_mask);
4643 	BUG_ON(ring->ring[offset] != 0x55aa55aa);
4644 
4645 	cur = (ring->wptr - 1) & ring->buf_mask;
4646 	if (likely(cur > offset))
4647 		ring->ring[offset] = cur - offset;
4648 	else
4649 		ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
4650 }
4651 
4652 static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
4653 {
4654 	int i, r = 0;
4655 	struct amdgpu_device *adev = ring->adev;
4656 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4657 	struct amdgpu_ring *kiq_ring = &kiq->ring;
4658 
4659 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
4660 		return -EINVAL;
4661 
4662 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size))
4663 		return -ENOMEM;
4664 
4665 	/* assert preemption condition */
4666 	amdgpu_ring_set_preempt_cond_exec(ring, false);
4667 
4668 	/* assert IB preemption, emit the trailing fence */
4669 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
4670 				   ring->trail_fence_gpu_addr,
4671 				   ++ring->trail_seq);
4672 	amdgpu_ring_commit(kiq_ring);
4673 
4674 	/* poll the trailing fence */
4675 	for (i = 0; i < adev->usec_timeout; i++) {
4676 		if (ring->trail_seq ==
4677 		    le32_to_cpu(*(ring->trail_fence_cpu_addr)))
4678 			break;
4679 		udelay(1);
4680 	}
4681 
4682 	if (i >= adev->usec_timeout) {
4683 		r = -EINVAL;
4684 		DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
4685 	}
4686 
4687 	/* deassert preemption condition */
4688 	amdgpu_ring_set_preempt_cond_exec(ring, true);
4689 	return r;
4690 }
4691 
4692 static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
4693 {
4694 	struct amdgpu_device *adev = ring->adev;
4695 	struct v10_ce_ib_state ce_payload = {0};
4696 	uint64_t csa_addr;
4697 	int cnt;
4698 
4699 	cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4700 	csa_addr = amdgpu_csa_vaddr(ring->adev);
4701 
4702 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4703 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4704 				 WRITE_DATA_DST_SEL(8) |
4705 				 WR_CONFIRM) |
4706 				 WRITE_DATA_CACHE_POLICY(0));
4707 	amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4708 			      offsetof(struct v10_gfx_meta_data, ce_payload)));
4709 	amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4710 			      offsetof(struct v10_gfx_meta_data, ce_payload)));
4711 
4712 	if (resume)
4713 		amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4714 					   offsetof(struct v10_gfx_meta_data,
4715 						    ce_payload),
4716 					   sizeof(ce_payload) >> 2);
4717 	else
4718 		amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
4719 					   sizeof(ce_payload) >> 2);
4720 }
4721 
4722 static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
4723 {
4724 	struct amdgpu_device *adev = ring->adev;
4725 	struct v10_de_ib_state de_payload = {0};
4726 	uint64_t csa_addr, gds_addr;
4727 	int cnt;
4728 
4729 	csa_addr = amdgpu_csa_vaddr(ring->adev);
4730 	gds_addr = ALIGN(csa_addr + AMDGPU_CSA_SIZE - adev->gds.gds_size,
4731 			 PAGE_SIZE);
4732 	de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4733 	de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4734 
4735 	cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4736 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4737 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4738 				 WRITE_DATA_DST_SEL(8) |
4739 				 WR_CONFIRM) |
4740 				 WRITE_DATA_CACHE_POLICY(0));
4741 	amdgpu_ring_write(ring, lower_32_bits(csa_addr +
4742 			      offsetof(struct v10_gfx_meta_data, de_payload)));
4743 	amdgpu_ring_write(ring, upper_32_bits(csa_addr +
4744 			      offsetof(struct v10_gfx_meta_data, de_payload)));
4745 
4746 	if (resume)
4747 		amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
4748 					   offsetof(struct v10_gfx_meta_data,
4749 						    de_payload),
4750 					   sizeof(de_payload) >> 2);
4751 	else
4752 		amdgpu_ring_write_multiple(ring, (void *)&de_payload,
4753 					   sizeof(de_payload) >> 2);
4754 }
4755 
4756 static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4757 {
4758 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4759 	amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4760 }
4761 
4762 static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4763 {
4764 	struct amdgpu_device *adev = ring->adev;
4765 
4766 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4767 	amdgpu_ring_write(ring, 0 |	/* src: register*/
4768 				(5 << 8) |	/* dst: memory */
4769 				(1 << 20));	/* write confirm */
4770 	amdgpu_ring_write(ring, reg);
4771 	amdgpu_ring_write(ring, 0);
4772 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4773 				adev->virt.reg_val_offs * 4));
4774 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4775 				adev->virt.reg_val_offs * 4));
4776 }
4777 
4778 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4779 				   uint32_t val)
4780 {
4781 	uint32_t cmd = 0;
4782 
4783 	switch (ring->funcs->type) {
4784 	case AMDGPU_RING_TYPE_GFX:
4785 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4786 		break;
4787 	case AMDGPU_RING_TYPE_KIQ:
4788 		cmd = (1 << 16); /* no inc addr */
4789 		break;
4790 	default:
4791 		cmd = WR_CONFIRM;
4792 		break;
4793 	}
4794 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4795 	amdgpu_ring_write(ring, cmd);
4796 	amdgpu_ring_write(ring, reg);
4797 	amdgpu_ring_write(ring, 0);
4798 	amdgpu_ring_write(ring, val);
4799 }
4800 
4801 static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4802 					uint32_t val, uint32_t mask)
4803 {
4804 	gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4805 }
4806 
4807 static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4808 						   uint32_t reg0, uint32_t reg1,
4809 						   uint32_t ref, uint32_t mask)
4810 {
4811 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4812 	struct amdgpu_device *adev = ring->adev;
4813 	bool fw_version_ok = false;
4814 
4815 	fw_version_ok = adev->gfx.cp_fw_write_wait;
4816 
4817 	if (fw_version_ok)
4818 		gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4819 				       ref, mask, 0x20);
4820 	else
4821 		amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4822 							   ref, mask);
4823 }
4824 
4825 static void
4826 gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4827 				      uint32_t me, uint32_t pipe,
4828 				      enum amdgpu_interrupt_state state)
4829 {
4830 	uint32_t cp_int_cntl, cp_int_cntl_reg;
4831 
4832 	if (!me) {
4833 		switch (pipe) {
4834 		case 0:
4835 			cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0);
4836 			break;
4837 		case 1:
4838 			cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1);
4839 			break;
4840 		default:
4841 			DRM_DEBUG("invalid pipe %d\n", pipe);
4842 			return;
4843 		}
4844 	} else {
4845 		DRM_DEBUG("invalid me %d\n", me);
4846 		return;
4847 	}
4848 
4849 	switch (state) {
4850 	case AMDGPU_IRQ_STATE_DISABLE:
4851 		cp_int_cntl = RREG32(cp_int_cntl_reg);
4852 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4853 					    TIME_STAMP_INT_ENABLE, 0);
4854 		WREG32(cp_int_cntl_reg, cp_int_cntl);
4855 		break;
4856 	case AMDGPU_IRQ_STATE_ENABLE:
4857 		cp_int_cntl = RREG32(cp_int_cntl_reg);
4858 		cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4859 					    TIME_STAMP_INT_ENABLE, 1);
4860 		WREG32(cp_int_cntl_reg, cp_int_cntl);
4861 		break;
4862 	default:
4863 		break;
4864 	}
4865 }
4866 
4867 static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4868 						     int me, int pipe,
4869 						     enum amdgpu_interrupt_state state)
4870 {
4871 	u32 mec_int_cntl, mec_int_cntl_reg;
4872 
4873 	/*
4874 	 * amdgpu controls only the first MEC. That's why this function only
4875 	 * handles the setting of interrupts for this specific MEC. All other
4876 	 * pipes' interrupts are set by amdkfd.
4877 	 */
4878 
4879 	if (me == 1) {
4880 		switch (pipe) {
4881 		case 0:
4882 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4883 			break;
4884 		case 1:
4885 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4886 			break;
4887 		case 2:
4888 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4889 			break;
4890 		case 3:
4891 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4892 			break;
4893 		default:
4894 			DRM_DEBUG("invalid pipe %d\n", pipe);
4895 			return;
4896 		}
4897 	} else {
4898 		DRM_DEBUG("invalid me %d\n", me);
4899 		return;
4900 	}
4901 
4902 	switch (state) {
4903 	case AMDGPU_IRQ_STATE_DISABLE:
4904 		mec_int_cntl = RREG32(mec_int_cntl_reg);
4905 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4906 					     TIME_STAMP_INT_ENABLE, 0);
4907 		WREG32(mec_int_cntl_reg, mec_int_cntl);
4908 		break;
4909 	case AMDGPU_IRQ_STATE_ENABLE:
4910 		mec_int_cntl = RREG32(mec_int_cntl_reg);
4911 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4912 					     TIME_STAMP_INT_ENABLE, 1);
4913 		WREG32(mec_int_cntl_reg, mec_int_cntl);
4914 		break;
4915 	default:
4916 		break;
4917 	}
4918 }
4919 
4920 static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4921 					    struct amdgpu_irq_src *src,
4922 					    unsigned type,
4923 					    enum amdgpu_interrupt_state state)
4924 {
4925 	switch (type) {
4926 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
4927 		gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
4928 		break;
4929 	case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
4930 		gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
4931 		break;
4932 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4933 		gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4934 		break;
4935 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4936 		gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4937 		break;
4938 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4939 		gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4940 		break;
4941 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4942 		gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4943 		break;
4944 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4945 		gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4946 		break;
4947 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4948 		gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4949 		break;
4950 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4951 		gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4952 		break;
4953 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4954 		gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4955 		break;
4956 	default:
4957 		break;
4958 	}
4959 	return 0;
4960 }
4961 
4962 static int gfx_v10_0_eop_irq(struct amdgpu_device *adev,
4963 			     struct amdgpu_irq_src *source,
4964 			     struct amdgpu_iv_entry *entry)
4965 {
4966 	int i;
4967 	u8 me_id, pipe_id, queue_id;
4968 	struct amdgpu_ring *ring;
4969 
4970 	DRM_DEBUG("IH: CP EOP\n");
4971 	me_id = (entry->ring_id & 0x0c) >> 2;
4972 	pipe_id = (entry->ring_id & 0x03) >> 0;
4973 	queue_id = (entry->ring_id & 0x70) >> 4;
4974 
4975 	switch (me_id) {
4976 	case 0:
4977 		if (pipe_id == 0)
4978 			amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4979 		else
4980 			amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
4981 		break;
4982 	case 1:
4983 	case 2:
4984 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4985 			ring = &adev->gfx.compute_ring[i];
4986 			/* Per-queue interrupt is supported for MEC starting from VI.
4987 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
4988 			  */
4989 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4990 				amdgpu_fence_process(ring);
4991 		}
4992 		break;
4993 	}
4994 	return 0;
4995 }
4996 
4997 static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4998 					      struct amdgpu_irq_src *source,
4999 					      unsigned type,
5000 					      enum amdgpu_interrupt_state state)
5001 {
5002 	switch (state) {
5003 	case AMDGPU_IRQ_STATE_DISABLE:
5004 	case AMDGPU_IRQ_STATE_ENABLE:
5005 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5006 			       PRIV_REG_INT_ENABLE,
5007 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5008 		break;
5009 	default:
5010 		break;
5011 	}
5012 
5013 	return 0;
5014 }
5015 
5016 static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5017 					       struct amdgpu_irq_src *source,
5018 					       unsigned type,
5019 					       enum amdgpu_interrupt_state state)
5020 {
5021 	switch (state) {
5022 	case AMDGPU_IRQ_STATE_DISABLE:
5023 	case AMDGPU_IRQ_STATE_ENABLE:
5024 		WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5025 			       PRIV_INSTR_INT_ENABLE,
5026 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5027 	default:
5028 		break;
5029 	}
5030 
5031 	return 0;
5032 }
5033 
5034 static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev,
5035 					struct amdgpu_iv_entry *entry)
5036 {
5037 	u8 me_id, pipe_id, queue_id;
5038 	struct amdgpu_ring *ring;
5039 	int i;
5040 
5041 	me_id = (entry->ring_id & 0x0c) >> 2;
5042 	pipe_id = (entry->ring_id & 0x03) >> 0;
5043 	queue_id = (entry->ring_id & 0x70) >> 4;
5044 
5045 	switch (me_id) {
5046 	case 0:
5047 		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5048 			ring = &adev->gfx.gfx_ring[i];
5049 			/* we only enabled 1 gfx queue per pipe for now */
5050 			if (ring->me == me_id && ring->pipe == pipe_id)
5051 				drm_sched_fault(&ring->sched);
5052 		}
5053 		break;
5054 	case 1:
5055 	case 2:
5056 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5057 			ring = &adev->gfx.compute_ring[i];
5058 			if (ring->me == me_id && ring->pipe == pipe_id &&
5059 			    ring->queue == queue_id)
5060 				drm_sched_fault(&ring->sched);
5061 		}
5062 		break;
5063 	default:
5064 		BUG();
5065 	}
5066 }
5067 
5068 static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev,
5069 				  struct amdgpu_irq_src *source,
5070 				  struct amdgpu_iv_entry *entry)
5071 {
5072 	DRM_ERROR("Illegal register access in command stream\n");
5073 	gfx_v10_0_handle_priv_fault(adev, entry);
5074 	return 0;
5075 }
5076 
5077 static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev,
5078 				   struct amdgpu_irq_src *source,
5079 				   struct amdgpu_iv_entry *entry)
5080 {
5081 	DRM_ERROR("Illegal instruction in command stream\n");
5082 	gfx_v10_0_handle_priv_fault(adev, entry);
5083 	return 0;
5084 }
5085 
5086 static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
5087 					     struct amdgpu_irq_src *src,
5088 					     unsigned int type,
5089 					     enum amdgpu_interrupt_state state)
5090 {
5091 	uint32_t tmp, target;
5092 	struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
5093 
5094 	if (ring->me == 1)
5095 		target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5096 	else
5097 		target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
5098 	target += ring->pipe;
5099 
5100 	switch (type) {
5101 	case AMDGPU_CP_KIQ_IRQ_DRIVER0:
5102 		if (state == AMDGPU_IRQ_STATE_DISABLE) {
5103 			tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
5104 			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
5105 					    GENERIC2_INT_ENABLE, 0);
5106 			WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
5107 
5108 			tmp = RREG32(target);
5109 			tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
5110 					    GENERIC2_INT_ENABLE, 0);
5111 			WREG32(target, tmp);
5112 		} else {
5113 			tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
5114 			tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
5115 					    GENERIC2_INT_ENABLE, 1);
5116 			WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
5117 
5118 			tmp = RREG32(target);
5119 			tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
5120 					    GENERIC2_INT_ENABLE, 1);
5121 			WREG32(target, tmp);
5122 		}
5123 		break;
5124 	default:
5125 		BUG(); /* kiq only support GENERIC2_INT now */
5126 		break;
5127 	}
5128 	return 0;
5129 }
5130 
5131 static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
5132 			     struct amdgpu_irq_src *source,
5133 			     struct amdgpu_iv_entry *entry)
5134 {
5135 	u8 me_id, pipe_id, queue_id;
5136 	struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
5137 
5138 	me_id = (entry->ring_id & 0x0c) >> 2;
5139 	pipe_id = (entry->ring_id & 0x03) >> 0;
5140 	queue_id = (entry->ring_id & 0x70) >> 4;
5141 	DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
5142 		   me_id, pipe_id, queue_id);
5143 
5144 	amdgpu_fence_process(ring);
5145 	return 0;
5146 }
5147 
5148 static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
5149 	.name = "gfx_v10_0",
5150 	.early_init = gfx_v10_0_early_init,
5151 	.late_init = gfx_v10_0_late_init,
5152 	.sw_init = gfx_v10_0_sw_init,
5153 	.sw_fini = gfx_v10_0_sw_fini,
5154 	.hw_init = gfx_v10_0_hw_init,
5155 	.hw_fini = gfx_v10_0_hw_fini,
5156 	.suspend = gfx_v10_0_suspend,
5157 	.resume = gfx_v10_0_resume,
5158 	.is_idle = gfx_v10_0_is_idle,
5159 	.wait_for_idle = gfx_v10_0_wait_for_idle,
5160 	.soft_reset = gfx_v10_0_soft_reset,
5161 	.set_clockgating_state = gfx_v10_0_set_clockgating_state,
5162 	.set_powergating_state = gfx_v10_0_set_powergating_state,
5163 	.get_clockgating_state = gfx_v10_0_get_clockgating_state,
5164 };
5165 
5166 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
5167 	.type = AMDGPU_RING_TYPE_GFX,
5168 	.align_mask = 0xff,
5169 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
5170 	.support_64bit_ptrs = true,
5171 	.vmhub = AMDGPU_GFXHUB_0,
5172 	.get_rptr = gfx_v10_0_ring_get_rptr_gfx,
5173 	.get_wptr = gfx_v10_0_ring_get_wptr_gfx,
5174 	.set_wptr = gfx_v10_0_ring_set_wptr_gfx,
5175 	.emit_frame_size = /* totally 242 maximum if 16 IBs */
5176 		5 + /* COND_EXEC */
5177 		7 + /* PIPELINE_SYNC */
5178 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5179 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5180 		2 + /* VM_FLUSH */
5181 		8 + /* FENCE for VM_FLUSH */
5182 		20 + /* GDS switch */
5183 		4 + /* double SWITCH_BUFFER,
5184 		     * the first COND_EXEC jump to the place
5185 		     * just prior to this double SWITCH_BUFFER
5186 		     */
5187 		5 + /* COND_EXEC */
5188 		7 + /* HDP_flush */
5189 		4 + /* VGT_flush */
5190 		14 + /*	CE_META */
5191 		31 + /*	DE_META */
5192 		3 + /* CNTX_CTRL */
5193 		5 + /* HDP_INVL */
5194 		8 + 8 + /* FENCE x2 */
5195 		2, /* SWITCH_BUFFER */
5196 	.emit_ib_size =	4, /* gfx_v10_0_ring_emit_ib_gfx */
5197 	.emit_ib = gfx_v10_0_ring_emit_ib_gfx,
5198 	.emit_fence = gfx_v10_0_ring_emit_fence,
5199 	.emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
5200 	.emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
5201 	.emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
5202 	.emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5203 	.test_ring = gfx_v10_0_ring_test_ring,
5204 	.test_ib = gfx_v10_0_ring_test_ib,
5205 	.insert_nop = amdgpu_ring_insert_nop,
5206 	.pad_ib = amdgpu_ring_generic_pad_ib,
5207 	.emit_switch_buffer = gfx_v10_0_ring_emit_sb,
5208 	.emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl,
5209 	.init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
5210 	.patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
5211 	.preempt_ib = gfx_v10_0_ring_preempt_ib,
5212 	.emit_tmz = gfx_v10_0_ring_emit_tmz,
5213 	.emit_wreg = gfx_v10_0_ring_emit_wreg,
5214 	.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5215 	.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5216 };
5217 
5218 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
5219 	.type = AMDGPU_RING_TYPE_COMPUTE,
5220 	.align_mask = 0xff,
5221 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
5222 	.support_64bit_ptrs = true,
5223 	.vmhub = AMDGPU_GFXHUB_0,
5224 	.get_rptr = gfx_v10_0_ring_get_rptr_compute,
5225 	.get_wptr = gfx_v10_0_ring_get_wptr_compute,
5226 	.set_wptr = gfx_v10_0_ring_set_wptr_compute,
5227 	.emit_frame_size =
5228 		20 + /* gfx_v10_0_ring_emit_gds_switch */
5229 		7 + /* gfx_v10_0_ring_emit_hdp_flush */
5230 		5 + /* hdp invalidate */
5231 		7 + /* gfx_v10_0_ring_emit_pipeline_sync */
5232 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5233 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5234 		2 + /* gfx_v10_0_ring_emit_vm_flush */
5235 		8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
5236 	.emit_ib_size =	7, /* gfx_v10_0_ring_emit_ib_compute */
5237 	.emit_ib = gfx_v10_0_ring_emit_ib_compute,
5238 	.emit_fence = gfx_v10_0_ring_emit_fence,
5239 	.emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync,
5240 	.emit_vm_flush = gfx_v10_0_ring_emit_vm_flush,
5241 	.emit_gds_switch = gfx_v10_0_ring_emit_gds_switch,
5242 	.emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
5243 	.test_ring = gfx_v10_0_ring_test_ring,
5244 	.test_ib = gfx_v10_0_ring_test_ib,
5245 	.insert_nop = amdgpu_ring_insert_nop,
5246 	.pad_ib = amdgpu_ring_generic_pad_ib,
5247 	.emit_wreg = gfx_v10_0_ring_emit_wreg,
5248 	.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5249 	.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5250 };
5251 
5252 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
5253 	.type = AMDGPU_RING_TYPE_KIQ,
5254 	.align_mask = 0xff,
5255 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
5256 	.support_64bit_ptrs = true,
5257 	.vmhub = AMDGPU_GFXHUB_0,
5258 	.get_rptr = gfx_v10_0_ring_get_rptr_compute,
5259 	.get_wptr = gfx_v10_0_ring_get_wptr_compute,
5260 	.set_wptr = gfx_v10_0_ring_set_wptr_compute,
5261 	.emit_frame_size =
5262 		20 + /* gfx_v10_0_ring_emit_gds_switch */
5263 		7 + /* gfx_v10_0_ring_emit_hdp_flush */
5264 		5 + /*hdp invalidate */
5265 		7 + /* gfx_v10_0_ring_emit_pipeline_sync */
5266 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
5267 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
5268 		2 + /* gfx_v10_0_ring_emit_vm_flush */
5269 		8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
5270 	.emit_ib_size =	7, /* gfx_v10_0_ring_emit_ib_compute */
5271 	.emit_ib = gfx_v10_0_ring_emit_ib_compute,
5272 	.emit_fence = gfx_v10_0_ring_emit_fence_kiq,
5273 	.test_ring = gfx_v10_0_ring_test_ring,
5274 	.test_ib = gfx_v10_0_ring_test_ib,
5275 	.insert_nop = amdgpu_ring_insert_nop,
5276 	.pad_ib = amdgpu_ring_generic_pad_ib,
5277 	.emit_rreg = gfx_v10_0_ring_emit_rreg,
5278 	.emit_wreg = gfx_v10_0_ring_emit_wreg,
5279 	.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5280 	.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5281 };
5282 
5283 static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
5284 {
5285 	int i;
5286 
5287 	adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
5288 
5289 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5290 		adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
5291 
5292 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
5293 		adev->gfx.compute_ring[i].funcs = &gfx_v10_0_ring_funcs_compute;
5294 }
5295 
5296 static const struct amdgpu_irq_src_funcs gfx_v10_0_eop_irq_funcs = {
5297 	.set = gfx_v10_0_set_eop_interrupt_state,
5298 	.process = gfx_v10_0_eop_irq,
5299 };
5300 
5301 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = {
5302 	.set = gfx_v10_0_set_priv_reg_fault_state,
5303 	.process = gfx_v10_0_priv_reg_irq,
5304 };
5305 
5306 static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = {
5307 	.set = gfx_v10_0_set_priv_inst_fault_state,
5308 	.process = gfx_v10_0_priv_inst_irq,
5309 };
5310 
5311 static const struct amdgpu_irq_src_funcs gfx_v10_0_kiq_irq_funcs = {
5312 	.set = gfx_v10_0_kiq_set_interrupt_state,
5313 	.process = gfx_v10_0_kiq_irq,
5314 };
5315 
5316 static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
5317 {
5318 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5319 	adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs;
5320 
5321 	adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
5322 	adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs;
5323 
5324 	adev->gfx.priv_reg_irq.num_types = 1;
5325 	adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
5326 
5327 	adev->gfx.priv_inst_irq.num_types = 1;
5328 	adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs;
5329 }
5330 
5331 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
5332 {
5333 	switch (adev->asic_type) {
5334 	case CHIP_NAVI10:
5335 	case CHIP_NAVI14:
5336 	case CHIP_NAVI12:
5337 		adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
5338 		break;
5339 	default:
5340 		break;
5341 	}
5342 }
5343 
5344 static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
5345 {
5346 	unsigned total_cu = adev->gfx.config.max_cu_per_sh *
5347 			    adev->gfx.config.max_sh_per_se *
5348 			    adev->gfx.config.max_shader_engines;
5349 
5350 	adev->gds.gds_size = 0x10000;
5351 	adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
5352 	adev->gds.gws_size = 64;
5353 	adev->gds.oa_size = 16;
5354 }
5355 
5356 static void gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
5357 							  u32 bitmap)
5358 {
5359 	u32 data;
5360 
5361 	if (!bitmap)
5362 		return;
5363 
5364 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5365 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5366 
5367 	WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
5368 }
5369 
5370 static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
5371 {
5372 	u32 data, wgp_bitmask;
5373 	data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
5374 	data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
5375 
5376 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
5377 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
5378 
5379 	wgp_bitmask =
5380 		amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
5381 
5382 	return (~data) & wgp_bitmask;
5383 }
5384 
5385 static u32 gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
5386 {
5387 	u32 wgp_idx, wgp_active_bitmap;
5388 	u32 cu_bitmap_per_wgp, cu_active_bitmap;
5389 
5390 	wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
5391 	cu_active_bitmap = 0;
5392 
5393 	for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
5394 		/* if there is one WGP enabled, it means 2 CUs will be enabled */
5395 		cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
5396 		if (wgp_active_bitmap & (1 << wgp_idx))
5397 			cu_active_bitmap |= cu_bitmap_per_wgp;
5398 	}
5399 
5400 	return cu_active_bitmap;
5401 }
5402 
5403 static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
5404 				 struct amdgpu_cu_info *cu_info)
5405 {
5406 	int i, j, k, counter, active_cu_number = 0;
5407 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5408 	unsigned disable_masks[4 * 2];
5409 
5410 	if (!adev || !cu_info)
5411 		return -EINVAL;
5412 
5413 	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5414 
5415 	mutex_lock(&adev->grbm_idx_mutex);
5416 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5417 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5418 			mask = 1;
5419 			ao_bitmap = 0;
5420 			counter = 0;
5421 			gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
5422 			if (i < 4 && j < 2)
5423 				gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
5424 					adev, disable_masks[i * 2 + j]);
5425 			bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
5426 			cu_info->bitmap[i][j] = bitmap;
5427 
5428 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
5429 				if (bitmap & mask) {
5430 					if (counter < adev->gfx.config.max_cu_per_sh)
5431 						ao_bitmap |= mask;
5432 					counter++;
5433 				}
5434 				mask <<= 1;
5435 			}
5436 			active_cu_number += counter;
5437 			if (i < 2 && j < 2)
5438 				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5439 			cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5440 		}
5441 	}
5442 	gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5443 	mutex_unlock(&adev->grbm_idx_mutex);
5444 
5445 	cu_info->number = active_cu_number;
5446 	cu_info->ao_cu_mask = ao_cu_mask;
5447 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5448 
5449 	return 0;
5450 }
5451 
5452 const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
5453 {
5454 	.type = AMD_IP_BLOCK_TYPE_GFX,
5455 	.major = 10,
5456 	.minor = 0,
5457 	.rev = 0,
5458 	.funcs = &gfx_v10_0_ip_funcs,
5459 };
5460