xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c (revision 3213486f)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <drm/drmP.h>
25 #include "amdgpu.h"
26 #include "amdgpu_ih.h"
27 #include "amdgpu_gfx.h"
28 #include "cikd.h"
29 #include "cik.h"
30 #include "cik_structs.h"
31 #include "atom.h"
32 #include "amdgpu_ucode.h"
33 #include "clearstate_ci.h"
34 
35 #include "dce/dce_8_0_d.h"
36 #include "dce/dce_8_0_sh_mask.h"
37 
38 #include "bif/bif_4_1_d.h"
39 #include "bif/bif_4_1_sh_mask.h"
40 
41 #include "gca/gfx_7_0_d.h"
42 #include "gca/gfx_7_2_enum.h"
43 #include "gca/gfx_7_2_sh_mask.h"
44 
45 #include "gmc/gmc_7_0_d.h"
46 #include "gmc/gmc_7_0_sh_mask.h"
47 
48 #include "oss/oss_2_0_d.h"
49 #include "oss/oss_2_0_sh_mask.h"
50 
51 #define NUM_SIMD_PER_CU 0x4 /* missing from the gfx_7 IP headers */
52 
53 #define GFX7_NUM_GFX_RINGS     1
54 #define GFX7_MEC_HPD_SIZE      2048
55 
56 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
57 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
58 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
59 
60 MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
61 MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
62 MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
63 MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
64 MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
65 
66 MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
67 MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
68 MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
69 MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
70 MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
71 
72 MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
73 MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
74 MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
75 MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
76 MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
77 MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
78 
79 MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
80 MODULE_FIRMWARE("amdgpu/kabini_me.bin");
81 MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
82 MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
83 MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
84 
85 MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
86 MODULE_FIRMWARE("amdgpu/mullins_me.bin");
87 MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
88 MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
89 MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
90 
91 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
92 {
93 	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
94 	{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
95 	{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
96 	{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
97 	{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
98 	{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
99 	{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
100 	{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
101 	{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
102 	{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
103 	{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
104 	{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
105 	{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
106 	{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
107 	{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
108 	{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
109 };
110 
111 static const u32 spectre_rlc_save_restore_register_list[] =
112 {
113 	(0x0e00 << 16) | (0xc12c >> 2),
114 	0x00000000,
115 	(0x0e00 << 16) | (0xc140 >> 2),
116 	0x00000000,
117 	(0x0e00 << 16) | (0xc150 >> 2),
118 	0x00000000,
119 	(0x0e00 << 16) | (0xc15c >> 2),
120 	0x00000000,
121 	(0x0e00 << 16) | (0xc168 >> 2),
122 	0x00000000,
123 	(0x0e00 << 16) | (0xc170 >> 2),
124 	0x00000000,
125 	(0x0e00 << 16) | (0xc178 >> 2),
126 	0x00000000,
127 	(0x0e00 << 16) | (0xc204 >> 2),
128 	0x00000000,
129 	(0x0e00 << 16) | (0xc2b4 >> 2),
130 	0x00000000,
131 	(0x0e00 << 16) | (0xc2b8 >> 2),
132 	0x00000000,
133 	(0x0e00 << 16) | (0xc2bc >> 2),
134 	0x00000000,
135 	(0x0e00 << 16) | (0xc2c0 >> 2),
136 	0x00000000,
137 	(0x0e00 << 16) | (0x8228 >> 2),
138 	0x00000000,
139 	(0x0e00 << 16) | (0x829c >> 2),
140 	0x00000000,
141 	(0x0e00 << 16) | (0x869c >> 2),
142 	0x00000000,
143 	(0x0600 << 16) | (0x98f4 >> 2),
144 	0x00000000,
145 	(0x0e00 << 16) | (0x98f8 >> 2),
146 	0x00000000,
147 	(0x0e00 << 16) | (0x9900 >> 2),
148 	0x00000000,
149 	(0x0e00 << 16) | (0xc260 >> 2),
150 	0x00000000,
151 	(0x0e00 << 16) | (0x90e8 >> 2),
152 	0x00000000,
153 	(0x0e00 << 16) | (0x3c000 >> 2),
154 	0x00000000,
155 	(0x0e00 << 16) | (0x3c00c >> 2),
156 	0x00000000,
157 	(0x0e00 << 16) | (0x8c1c >> 2),
158 	0x00000000,
159 	(0x0e00 << 16) | (0x9700 >> 2),
160 	0x00000000,
161 	(0x0e00 << 16) | (0xcd20 >> 2),
162 	0x00000000,
163 	(0x4e00 << 16) | (0xcd20 >> 2),
164 	0x00000000,
165 	(0x5e00 << 16) | (0xcd20 >> 2),
166 	0x00000000,
167 	(0x6e00 << 16) | (0xcd20 >> 2),
168 	0x00000000,
169 	(0x7e00 << 16) | (0xcd20 >> 2),
170 	0x00000000,
171 	(0x8e00 << 16) | (0xcd20 >> 2),
172 	0x00000000,
173 	(0x9e00 << 16) | (0xcd20 >> 2),
174 	0x00000000,
175 	(0xae00 << 16) | (0xcd20 >> 2),
176 	0x00000000,
177 	(0xbe00 << 16) | (0xcd20 >> 2),
178 	0x00000000,
179 	(0x0e00 << 16) | (0x89bc >> 2),
180 	0x00000000,
181 	(0x0e00 << 16) | (0x8900 >> 2),
182 	0x00000000,
183 	0x3,
184 	(0x0e00 << 16) | (0xc130 >> 2),
185 	0x00000000,
186 	(0x0e00 << 16) | (0xc134 >> 2),
187 	0x00000000,
188 	(0x0e00 << 16) | (0xc1fc >> 2),
189 	0x00000000,
190 	(0x0e00 << 16) | (0xc208 >> 2),
191 	0x00000000,
192 	(0x0e00 << 16) | (0xc264 >> 2),
193 	0x00000000,
194 	(0x0e00 << 16) | (0xc268 >> 2),
195 	0x00000000,
196 	(0x0e00 << 16) | (0xc26c >> 2),
197 	0x00000000,
198 	(0x0e00 << 16) | (0xc270 >> 2),
199 	0x00000000,
200 	(0x0e00 << 16) | (0xc274 >> 2),
201 	0x00000000,
202 	(0x0e00 << 16) | (0xc278 >> 2),
203 	0x00000000,
204 	(0x0e00 << 16) | (0xc27c >> 2),
205 	0x00000000,
206 	(0x0e00 << 16) | (0xc280 >> 2),
207 	0x00000000,
208 	(0x0e00 << 16) | (0xc284 >> 2),
209 	0x00000000,
210 	(0x0e00 << 16) | (0xc288 >> 2),
211 	0x00000000,
212 	(0x0e00 << 16) | (0xc28c >> 2),
213 	0x00000000,
214 	(0x0e00 << 16) | (0xc290 >> 2),
215 	0x00000000,
216 	(0x0e00 << 16) | (0xc294 >> 2),
217 	0x00000000,
218 	(0x0e00 << 16) | (0xc298 >> 2),
219 	0x00000000,
220 	(0x0e00 << 16) | (0xc29c >> 2),
221 	0x00000000,
222 	(0x0e00 << 16) | (0xc2a0 >> 2),
223 	0x00000000,
224 	(0x0e00 << 16) | (0xc2a4 >> 2),
225 	0x00000000,
226 	(0x0e00 << 16) | (0xc2a8 >> 2),
227 	0x00000000,
228 	(0x0e00 << 16) | (0xc2ac  >> 2),
229 	0x00000000,
230 	(0x0e00 << 16) | (0xc2b0 >> 2),
231 	0x00000000,
232 	(0x0e00 << 16) | (0x301d0 >> 2),
233 	0x00000000,
234 	(0x0e00 << 16) | (0x30238 >> 2),
235 	0x00000000,
236 	(0x0e00 << 16) | (0x30250 >> 2),
237 	0x00000000,
238 	(0x0e00 << 16) | (0x30254 >> 2),
239 	0x00000000,
240 	(0x0e00 << 16) | (0x30258 >> 2),
241 	0x00000000,
242 	(0x0e00 << 16) | (0x3025c >> 2),
243 	0x00000000,
244 	(0x4e00 << 16) | (0xc900 >> 2),
245 	0x00000000,
246 	(0x5e00 << 16) | (0xc900 >> 2),
247 	0x00000000,
248 	(0x6e00 << 16) | (0xc900 >> 2),
249 	0x00000000,
250 	(0x7e00 << 16) | (0xc900 >> 2),
251 	0x00000000,
252 	(0x8e00 << 16) | (0xc900 >> 2),
253 	0x00000000,
254 	(0x9e00 << 16) | (0xc900 >> 2),
255 	0x00000000,
256 	(0xae00 << 16) | (0xc900 >> 2),
257 	0x00000000,
258 	(0xbe00 << 16) | (0xc900 >> 2),
259 	0x00000000,
260 	(0x4e00 << 16) | (0xc904 >> 2),
261 	0x00000000,
262 	(0x5e00 << 16) | (0xc904 >> 2),
263 	0x00000000,
264 	(0x6e00 << 16) | (0xc904 >> 2),
265 	0x00000000,
266 	(0x7e00 << 16) | (0xc904 >> 2),
267 	0x00000000,
268 	(0x8e00 << 16) | (0xc904 >> 2),
269 	0x00000000,
270 	(0x9e00 << 16) | (0xc904 >> 2),
271 	0x00000000,
272 	(0xae00 << 16) | (0xc904 >> 2),
273 	0x00000000,
274 	(0xbe00 << 16) | (0xc904 >> 2),
275 	0x00000000,
276 	(0x4e00 << 16) | (0xc908 >> 2),
277 	0x00000000,
278 	(0x5e00 << 16) | (0xc908 >> 2),
279 	0x00000000,
280 	(0x6e00 << 16) | (0xc908 >> 2),
281 	0x00000000,
282 	(0x7e00 << 16) | (0xc908 >> 2),
283 	0x00000000,
284 	(0x8e00 << 16) | (0xc908 >> 2),
285 	0x00000000,
286 	(0x9e00 << 16) | (0xc908 >> 2),
287 	0x00000000,
288 	(0xae00 << 16) | (0xc908 >> 2),
289 	0x00000000,
290 	(0xbe00 << 16) | (0xc908 >> 2),
291 	0x00000000,
292 	(0x4e00 << 16) | (0xc90c >> 2),
293 	0x00000000,
294 	(0x5e00 << 16) | (0xc90c >> 2),
295 	0x00000000,
296 	(0x6e00 << 16) | (0xc90c >> 2),
297 	0x00000000,
298 	(0x7e00 << 16) | (0xc90c >> 2),
299 	0x00000000,
300 	(0x8e00 << 16) | (0xc90c >> 2),
301 	0x00000000,
302 	(0x9e00 << 16) | (0xc90c >> 2),
303 	0x00000000,
304 	(0xae00 << 16) | (0xc90c >> 2),
305 	0x00000000,
306 	(0xbe00 << 16) | (0xc90c >> 2),
307 	0x00000000,
308 	(0x4e00 << 16) | (0xc910 >> 2),
309 	0x00000000,
310 	(0x5e00 << 16) | (0xc910 >> 2),
311 	0x00000000,
312 	(0x6e00 << 16) | (0xc910 >> 2),
313 	0x00000000,
314 	(0x7e00 << 16) | (0xc910 >> 2),
315 	0x00000000,
316 	(0x8e00 << 16) | (0xc910 >> 2),
317 	0x00000000,
318 	(0x9e00 << 16) | (0xc910 >> 2),
319 	0x00000000,
320 	(0xae00 << 16) | (0xc910 >> 2),
321 	0x00000000,
322 	(0xbe00 << 16) | (0xc910 >> 2),
323 	0x00000000,
324 	(0x0e00 << 16) | (0xc99c >> 2),
325 	0x00000000,
326 	(0x0e00 << 16) | (0x9834 >> 2),
327 	0x00000000,
328 	(0x0000 << 16) | (0x30f00 >> 2),
329 	0x00000000,
330 	(0x0001 << 16) | (0x30f00 >> 2),
331 	0x00000000,
332 	(0x0000 << 16) | (0x30f04 >> 2),
333 	0x00000000,
334 	(0x0001 << 16) | (0x30f04 >> 2),
335 	0x00000000,
336 	(0x0000 << 16) | (0x30f08 >> 2),
337 	0x00000000,
338 	(0x0001 << 16) | (0x30f08 >> 2),
339 	0x00000000,
340 	(0x0000 << 16) | (0x30f0c >> 2),
341 	0x00000000,
342 	(0x0001 << 16) | (0x30f0c >> 2),
343 	0x00000000,
344 	(0x0600 << 16) | (0x9b7c >> 2),
345 	0x00000000,
346 	(0x0e00 << 16) | (0x8a14 >> 2),
347 	0x00000000,
348 	(0x0e00 << 16) | (0x8a18 >> 2),
349 	0x00000000,
350 	(0x0600 << 16) | (0x30a00 >> 2),
351 	0x00000000,
352 	(0x0e00 << 16) | (0x8bf0 >> 2),
353 	0x00000000,
354 	(0x0e00 << 16) | (0x8bcc >> 2),
355 	0x00000000,
356 	(0x0e00 << 16) | (0x8b24 >> 2),
357 	0x00000000,
358 	(0x0e00 << 16) | (0x30a04 >> 2),
359 	0x00000000,
360 	(0x0600 << 16) | (0x30a10 >> 2),
361 	0x00000000,
362 	(0x0600 << 16) | (0x30a14 >> 2),
363 	0x00000000,
364 	(0x0600 << 16) | (0x30a18 >> 2),
365 	0x00000000,
366 	(0x0600 << 16) | (0x30a2c >> 2),
367 	0x00000000,
368 	(0x0e00 << 16) | (0xc700 >> 2),
369 	0x00000000,
370 	(0x0e00 << 16) | (0xc704 >> 2),
371 	0x00000000,
372 	(0x0e00 << 16) | (0xc708 >> 2),
373 	0x00000000,
374 	(0x0e00 << 16) | (0xc768 >> 2),
375 	0x00000000,
376 	(0x0400 << 16) | (0xc770 >> 2),
377 	0x00000000,
378 	(0x0400 << 16) | (0xc774 >> 2),
379 	0x00000000,
380 	(0x0400 << 16) | (0xc778 >> 2),
381 	0x00000000,
382 	(0x0400 << 16) | (0xc77c >> 2),
383 	0x00000000,
384 	(0x0400 << 16) | (0xc780 >> 2),
385 	0x00000000,
386 	(0x0400 << 16) | (0xc784 >> 2),
387 	0x00000000,
388 	(0x0400 << 16) | (0xc788 >> 2),
389 	0x00000000,
390 	(0x0400 << 16) | (0xc78c >> 2),
391 	0x00000000,
392 	(0x0400 << 16) | (0xc798 >> 2),
393 	0x00000000,
394 	(0x0400 << 16) | (0xc79c >> 2),
395 	0x00000000,
396 	(0x0400 << 16) | (0xc7a0 >> 2),
397 	0x00000000,
398 	(0x0400 << 16) | (0xc7a4 >> 2),
399 	0x00000000,
400 	(0x0400 << 16) | (0xc7a8 >> 2),
401 	0x00000000,
402 	(0x0400 << 16) | (0xc7ac >> 2),
403 	0x00000000,
404 	(0x0400 << 16) | (0xc7b0 >> 2),
405 	0x00000000,
406 	(0x0400 << 16) | (0xc7b4 >> 2),
407 	0x00000000,
408 	(0x0e00 << 16) | (0x9100 >> 2),
409 	0x00000000,
410 	(0x0e00 << 16) | (0x3c010 >> 2),
411 	0x00000000,
412 	(0x0e00 << 16) | (0x92a8 >> 2),
413 	0x00000000,
414 	(0x0e00 << 16) | (0x92ac >> 2),
415 	0x00000000,
416 	(0x0e00 << 16) | (0x92b4 >> 2),
417 	0x00000000,
418 	(0x0e00 << 16) | (0x92b8 >> 2),
419 	0x00000000,
420 	(0x0e00 << 16) | (0x92bc >> 2),
421 	0x00000000,
422 	(0x0e00 << 16) | (0x92c0 >> 2),
423 	0x00000000,
424 	(0x0e00 << 16) | (0x92c4 >> 2),
425 	0x00000000,
426 	(0x0e00 << 16) | (0x92c8 >> 2),
427 	0x00000000,
428 	(0x0e00 << 16) | (0x92cc >> 2),
429 	0x00000000,
430 	(0x0e00 << 16) | (0x92d0 >> 2),
431 	0x00000000,
432 	(0x0e00 << 16) | (0x8c00 >> 2),
433 	0x00000000,
434 	(0x0e00 << 16) | (0x8c04 >> 2),
435 	0x00000000,
436 	(0x0e00 << 16) | (0x8c20 >> 2),
437 	0x00000000,
438 	(0x0e00 << 16) | (0x8c38 >> 2),
439 	0x00000000,
440 	(0x0e00 << 16) | (0x8c3c >> 2),
441 	0x00000000,
442 	(0x0e00 << 16) | (0xae00 >> 2),
443 	0x00000000,
444 	(0x0e00 << 16) | (0x9604 >> 2),
445 	0x00000000,
446 	(0x0e00 << 16) | (0xac08 >> 2),
447 	0x00000000,
448 	(0x0e00 << 16) | (0xac0c >> 2),
449 	0x00000000,
450 	(0x0e00 << 16) | (0xac10 >> 2),
451 	0x00000000,
452 	(0x0e00 << 16) | (0xac14 >> 2),
453 	0x00000000,
454 	(0x0e00 << 16) | (0xac58 >> 2),
455 	0x00000000,
456 	(0x0e00 << 16) | (0xac68 >> 2),
457 	0x00000000,
458 	(0x0e00 << 16) | (0xac6c >> 2),
459 	0x00000000,
460 	(0x0e00 << 16) | (0xac70 >> 2),
461 	0x00000000,
462 	(0x0e00 << 16) | (0xac74 >> 2),
463 	0x00000000,
464 	(0x0e00 << 16) | (0xac78 >> 2),
465 	0x00000000,
466 	(0x0e00 << 16) | (0xac7c >> 2),
467 	0x00000000,
468 	(0x0e00 << 16) | (0xac80 >> 2),
469 	0x00000000,
470 	(0x0e00 << 16) | (0xac84 >> 2),
471 	0x00000000,
472 	(0x0e00 << 16) | (0xac88 >> 2),
473 	0x00000000,
474 	(0x0e00 << 16) | (0xac8c >> 2),
475 	0x00000000,
476 	(0x0e00 << 16) | (0x970c >> 2),
477 	0x00000000,
478 	(0x0e00 << 16) | (0x9714 >> 2),
479 	0x00000000,
480 	(0x0e00 << 16) | (0x9718 >> 2),
481 	0x00000000,
482 	(0x0e00 << 16) | (0x971c >> 2),
483 	0x00000000,
484 	(0x0e00 << 16) | (0x31068 >> 2),
485 	0x00000000,
486 	(0x4e00 << 16) | (0x31068 >> 2),
487 	0x00000000,
488 	(0x5e00 << 16) | (0x31068 >> 2),
489 	0x00000000,
490 	(0x6e00 << 16) | (0x31068 >> 2),
491 	0x00000000,
492 	(0x7e00 << 16) | (0x31068 >> 2),
493 	0x00000000,
494 	(0x8e00 << 16) | (0x31068 >> 2),
495 	0x00000000,
496 	(0x9e00 << 16) | (0x31068 >> 2),
497 	0x00000000,
498 	(0xae00 << 16) | (0x31068 >> 2),
499 	0x00000000,
500 	(0xbe00 << 16) | (0x31068 >> 2),
501 	0x00000000,
502 	(0x0e00 << 16) | (0xcd10 >> 2),
503 	0x00000000,
504 	(0x0e00 << 16) | (0xcd14 >> 2),
505 	0x00000000,
506 	(0x0e00 << 16) | (0x88b0 >> 2),
507 	0x00000000,
508 	(0x0e00 << 16) | (0x88b4 >> 2),
509 	0x00000000,
510 	(0x0e00 << 16) | (0x88b8 >> 2),
511 	0x00000000,
512 	(0x0e00 << 16) | (0x88bc >> 2),
513 	0x00000000,
514 	(0x0400 << 16) | (0x89c0 >> 2),
515 	0x00000000,
516 	(0x0e00 << 16) | (0x88c4 >> 2),
517 	0x00000000,
518 	(0x0e00 << 16) | (0x88c8 >> 2),
519 	0x00000000,
520 	(0x0e00 << 16) | (0x88d0 >> 2),
521 	0x00000000,
522 	(0x0e00 << 16) | (0x88d4 >> 2),
523 	0x00000000,
524 	(0x0e00 << 16) | (0x88d8 >> 2),
525 	0x00000000,
526 	(0x0e00 << 16) | (0x8980 >> 2),
527 	0x00000000,
528 	(0x0e00 << 16) | (0x30938 >> 2),
529 	0x00000000,
530 	(0x0e00 << 16) | (0x3093c >> 2),
531 	0x00000000,
532 	(0x0e00 << 16) | (0x30940 >> 2),
533 	0x00000000,
534 	(0x0e00 << 16) | (0x89a0 >> 2),
535 	0x00000000,
536 	(0x0e00 << 16) | (0x30900 >> 2),
537 	0x00000000,
538 	(0x0e00 << 16) | (0x30904 >> 2),
539 	0x00000000,
540 	(0x0e00 << 16) | (0x89b4 >> 2),
541 	0x00000000,
542 	(0x0e00 << 16) | (0x3c210 >> 2),
543 	0x00000000,
544 	(0x0e00 << 16) | (0x3c214 >> 2),
545 	0x00000000,
546 	(0x0e00 << 16) | (0x3c218 >> 2),
547 	0x00000000,
548 	(0x0e00 << 16) | (0x8904 >> 2),
549 	0x00000000,
550 	0x5,
551 	(0x0e00 << 16) | (0x8c28 >> 2),
552 	(0x0e00 << 16) | (0x8c2c >> 2),
553 	(0x0e00 << 16) | (0x8c30 >> 2),
554 	(0x0e00 << 16) | (0x8c34 >> 2),
555 	(0x0e00 << 16) | (0x9600 >> 2),
556 };
557 
558 static const u32 kalindi_rlc_save_restore_register_list[] =
559 {
560 	(0x0e00 << 16) | (0xc12c >> 2),
561 	0x00000000,
562 	(0x0e00 << 16) | (0xc140 >> 2),
563 	0x00000000,
564 	(0x0e00 << 16) | (0xc150 >> 2),
565 	0x00000000,
566 	(0x0e00 << 16) | (0xc15c >> 2),
567 	0x00000000,
568 	(0x0e00 << 16) | (0xc168 >> 2),
569 	0x00000000,
570 	(0x0e00 << 16) | (0xc170 >> 2),
571 	0x00000000,
572 	(0x0e00 << 16) | (0xc204 >> 2),
573 	0x00000000,
574 	(0x0e00 << 16) | (0xc2b4 >> 2),
575 	0x00000000,
576 	(0x0e00 << 16) | (0xc2b8 >> 2),
577 	0x00000000,
578 	(0x0e00 << 16) | (0xc2bc >> 2),
579 	0x00000000,
580 	(0x0e00 << 16) | (0xc2c0 >> 2),
581 	0x00000000,
582 	(0x0e00 << 16) | (0x8228 >> 2),
583 	0x00000000,
584 	(0x0e00 << 16) | (0x829c >> 2),
585 	0x00000000,
586 	(0x0e00 << 16) | (0x869c >> 2),
587 	0x00000000,
588 	(0x0600 << 16) | (0x98f4 >> 2),
589 	0x00000000,
590 	(0x0e00 << 16) | (0x98f8 >> 2),
591 	0x00000000,
592 	(0x0e00 << 16) | (0x9900 >> 2),
593 	0x00000000,
594 	(0x0e00 << 16) | (0xc260 >> 2),
595 	0x00000000,
596 	(0x0e00 << 16) | (0x90e8 >> 2),
597 	0x00000000,
598 	(0x0e00 << 16) | (0x3c000 >> 2),
599 	0x00000000,
600 	(0x0e00 << 16) | (0x3c00c >> 2),
601 	0x00000000,
602 	(0x0e00 << 16) | (0x8c1c >> 2),
603 	0x00000000,
604 	(0x0e00 << 16) | (0x9700 >> 2),
605 	0x00000000,
606 	(0x0e00 << 16) | (0xcd20 >> 2),
607 	0x00000000,
608 	(0x4e00 << 16) | (0xcd20 >> 2),
609 	0x00000000,
610 	(0x5e00 << 16) | (0xcd20 >> 2),
611 	0x00000000,
612 	(0x6e00 << 16) | (0xcd20 >> 2),
613 	0x00000000,
614 	(0x7e00 << 16) | (0xcd20 >> 2),
615 	0x00000000,
616 	(0x0e00 << 16) | (0x89bc >> 2),
617 	0x00000000,
618 	(0x0e00 << 16) | (0x8900 >> 2),
619 	0x00000000,
620 	0x3,
621 	(0x0e00 << 16) | (0xc130 >> 2),
622 	0x00000000,
623 	(0x0e00 << 16) | (0xc134 >> 2),
624 	0x00000000,
625 	(0x0e00 << 16) | (0xc1fc >> 2),
626 	0x00000000,
627 	(0x0e00 << 16) | (0xc208 >> 2),
628 	0x00000000,
629 	(0x0e00 << 16) | (0xc264 >> 2),
630 	0x00000000,
631 	(0x0e00 << 16) | (0xc268 >> 2),
632 	0x00000000,
633 	(0x0e00 << 16) | (0xc26c >> 2),
634 	0x00000000,
635 	(0x0e00 << 16) | (0xc270 >> 2),
636 	0x00000000,
637 	(0x0e00 << 16) | (0xc274 >> 2),
638 	0x00000000,
639 	(0x0e00 << 16) | (0xc28c >> 2),
640 	0x00000000,
641 	(0x0e00 << 16) | (0xc290 >> 2),
642 	0x00000000,
643 	(0x0e00 << 16) | (0xc294 >> 2),
644 	0x00000000,
645 	(0x0e00 << 16) | (0xc298 >> 2),
646 	0x00000000,
647 	(0x0e00 << 16) | (0xc2a0 >> 2),
648 	0x00000000,
649 	(0x0e00 << 16) | (0xc2a4 >> 2),
650 	0x00000000,
651 	(0x0e00 << 16) | (0xc2a8 >> 2),
652 	0x00000000,
653 	(0x0e00 << 16) | (0xc2ac >> 2),
654 	0x00000000,
655 	(0x0e00 << 16) | (0x301d0 >> 2),
656 	0x00000000,
657 	(0x0e00 << 16) | (0x30238 >> 2),
658 	0x00000000,
659 	(0x0e00 << 16) | (0x30250 >> 2),
660 	0x00000000,
661 	(0x0e00 << 16) | (0x30254 >> 2),
662 	0x00000000,
663 	(0x0e00 << 16) | (0x30258 >> 2),
664 	0x00000000,
665 	(0x0e00 << 16) | (0x3025c >> 2),
666 	0x00000000,
667 	(0x4e00 << 16) | (0xc900 >> 2),
668 	0x00000000,
669 	(0x5e00 << 16) | (0xc900 >> 2),
670 	0x00000000,
671 	(0x6e00 << 16) | (0xc900 >> 2),
672 	0x00000000,
673 	(0x7e00 << 16) | (0xc900 >> 2),
674 	0x00000000,
675 	(0x4e00 << 16) | (0xc904 >> 2),
676 	0x00000000,
677 	(0x5e00 << 16) | (0xc904 >> 2),
678 	0x00000000,
679 	(0x6e00 << 16) | (0xc904 >> 2),
680 	0x00000000,
681 	(0x7e00 << 16) | (0xc904 >> 2),
682 	0x00000000,
683 	(0x4e00 << 16) | (0xc908 >> 2),
684 	0x00000000,
685 	(0x5e00 << 16) | (0xc908 >> 2),
686 	0x00000000,
687 	(0x6e00 << 16) | (0xc908 >> 2),
688 	0x00000000,
689 	(0x7e00 << 16) | (0xc908 >> 2),
690 	0x00000000,
691 	(0x4e00 << 16) | (0xc90c >> 2),
692 	0x00000000,
693 	(0x5e00 << 16) | (0xc90c >> 2),
694 	0x00000000,
695 	(0x6e00 << 16) | (0xc90c >> 2),
696 	0x00000000,
697 	(0x7e00 << 16) | (0xc90c >> 2),
698 	0x00000000,
699 	(0x4e00 << 16) | (0xc910 >> 2),
700 	0x00000000,
701 	(0x5e00 << 16) | (0xc910 >> 2),
702 	0x00000000,
703 	(0x6e00 << 16) | (0xc910 >> 2),
704 	0x00000000,
705 	(0x7e00 << 16) | (0xc910 >> 2),
706 	0x00000000,
707 	(0x0e00 << 16) | (0xc99c >> 2),
708 	0x00000000,
709 	(0x0e00 << 16) | (0x9834 >> 2),
710 	0x00000000,
711 	(0x0000 << 16) | (0x30f00 >> 2),
712 	0x00000000,
713 	(0x0000 << 16) | (0x30f04 >> 2),
714 	0x00000000,
715 	(0x0000 << 16) | (0x30f08 >> 2),
716 	0x00000000,
717 	(0x0000 << 16) | (0x30f0c >> 2),
718 	0x00000000,
719 	(0x0600 << 16) | (0x9b7c >> 2),
720 	0x00000000,
721 	(0x0e00 << 16) | (0x8a14 >> 2),
722 	0x00000000,
723 	(0x0e00 << 16) | (0x8a18 >> 2),
724 	0x00000000,
725 	(0x0600 << 16) | (0x30a00 >> 2),
726 	0x00000000,
727 	(0x0e00 << 16) | (0x8bf0 >> 2),
728 	0x00000000,
729 	(0x0e00 << 16) | (0x8bcc >> 2),
730 	0x00000000,
731 	(0x0e00 << 16) | (0x8b24 >> 2),
732 	0x00000000,
733 	(0x0e00 << 16) | (0x30a04 >> 2),
734 	0x00000000,
735 	(0x0600 << 16) | (0x30a10 >> 2),
736 	0x00000000,
737 	(0x0600 << 16) | (0x30a14 >> 2),
738 	0x00000000,
739 	(0x0600 << 16) | (0x30a18 >> 2),
740 	0x00000000,
741 	(0x0600 << 16) | (0x30a2c >> 2),
742 	0x00000000,
743 	(0x0e00 << 16) | (0xc700 >> 2),
744 	0x00000000,
745 	(0x0e00 << 16) | (0xc704 >> 2),
746 	0x00000000,
747 	(0x0e00 << 16) | (0xc708 >> 2),
748 	0x00000000,
749 	(0x0e00 << 16) | (0xc768 >> 2),
750 	0x00000000,
751 	(0x0400 << 16) | (0xc770 >> 2),
752 	0x00000000,
753 	(0x0400 << 16) | (0xc774 >> 2),
754 	0x00000000,
755 	(0x0400 << 16) | (0xc798 >> 2),
756 	0x00000000,
757 	(0x0400 << 16) | (0xc79c >> 2),
758 	0x00000000,
759 	(0x0e00 << 16) | (0x9100 >> 2),
760 	0x00000000,
761 	(0x0e00 << 16) | (0x3c010 >> 2),
762 	0x00000000,
763 	(0x0e00 << 16) | (0x8c00 >> 2),
764 	0x00000000,
765 	(0x0e00 << 16) | (0x8c04 >> 2),
766 	0x00000000,
767 	(0x0e00 << 16) | (0x8c20 >> 2),
768 	0x00000000,
769 	(0x0e00 << 16) | (0x8c38 >> 2),
770 	0x00000000,
771 	(0x0e00 << 16) | (0x8c3c >> 2),
772 	0x00000000,
773 	(0x0e00 << 16) | (0xae00 >> 2),
774 	0x00000000,
775 	(0x0e00 << 16) | (0x9604 >> 2),
776 	0x00000000,
777 	(0x0e00 << 16) | (0xac08 >> 2),
778 	0x00000000,
779 	(0x0e00 << 16) | (0xac0c >> 2),
780 	0x00000000,
781 	(0x0e00 << 16) | (0xac10 >> 2),
782 	0x00000000,
783 	(0x0e00 << 16) | (0xac14 >> 2),
784 	0x00000000,
785 	(0x0e00 << 16) | (0xac58 >> 2),
786 	0x00000000,
787 	(0x0e00 << 16) | (0xac68 >> 2),
788 	0x00000000,
789 	(0x0e00 << 16) | (0xac6c >> 2),
790 	0x00000000,
791 	(0x0e00 << 16) | (0xac70 >> 2),
792 	0x00000000,
793 	(0x0e00 << 16) | (0xac74 >> 2),
794 	0x00000000,
795 	(0x0e00 << 16) | (0xac78 >> 2),
796 	0x00000000,
797 	(0x0e00 << 16) | (0xac7c >> 2),
798 	0x00000000,
799 	(0x0e00 << 16) | (0xac80 >> 2),
800 	0x00000000,
801 	(0x0e00 << 16) | (0xac84 >> 2),
802 	0x00000000,
803 	(0x0e00 << 16) | (0xac88 >> 2),
804 	0x00000000,
805 	(0x0e00 << 16) | (0xac8c >> 2),
806 	0x00000000,
807 	(0x0e00 << 16) | (0x970c >> 2),
808 	0x00000000,
809 	(0x0e00 << 16) | (0x9714 >> 2),
810 	0x00000000,
811 	(0x0e00 << 16) | (0x9718 >> 2),
812 	0x00000000,
813 	(0x0e00 << 16) | (0x971c >> 2),
814 	0x00000000,
815 	(0x0e00 << 16) | (0x31068 >> 2),
816 	0x00000000,
817 	(0x4e00 << 16) | (0x31068 >> 2),
818 	0x00000000,
819 	(0x5e00 << 16) | (0x31068 >> 2),
820 	0x00000000,
821 	(0x6e00 << 16) | (0x31068 >> 2),
822 	0x00000000,
823 	(0x7e00 << 16) | (0x31068 >> 2),
824 	0x00000000,
825 	(0x0e00 << 16) | (0xcd10 >> 2),
826 	0x00000000,
827 	(0x0e00 << 16) | (0xcd14 >> 2),
828 	0x00000000,
829 	(0x0e00 << 16) | (0x88b0 >> 2),
830 	0x00000000,
831 	(0x0e00 << 16) | (0x88b4 >> 2),
832 	0x00000000,
833 	(0x0e00 << 16) | (0x88b8 >> 2),
834 	0x00000000,
835 	(0x0e00 << 16) | (0x88bc >> 2),
836 	0x00000000,
837 	(0x0400 << 16) | (0x89c0 >> 2),
838 	0x00000000,
839 	(0x0e00 << 16) | (0x88c4 >> 2),
840 	0x00000000,
841 	(0x0e00 << 16) | (0x88c8 >> 2),
842 	0x00000000,
843 	(0x0e00 << 16) | (0x88d0 >> 2),
844 	0x00000000,
845 	(0x0e00 << 16) | (0x88d4 >> 2),
846 	0x00000000,
847 	(0x0e00 << 16) | (0x88d8 >> 2),
848 	0x00000000,
849 	(0x0e00 << 16) | (0x8980 >> 2),
850 	0x00000000,
851 	(0x0e00 << 16) | (0x30938 >> 2),
852 	0x00000000,
853 	(0x0e00 << 16) | (0x3093c >> 2),
854 	0x00000000,
855 	(0x0e00 << 16) | (0x30940 >> 2),
856 	0x00000000,
857 	(0x0e00 << 16) | (0x89a0 >> 2),
858 	0x00000000,
859 	(0x0e00 << 16) | (0x30900 >> 2),
860 	0x00000000,
861 	(0x0e00 << 16) | (0x30904 >> 2),
862 	0x00000000,
863 	(0x0e00 << 16) | (0x89b4 >> 2),
864 	0x00000000,
865 	(0x0e00 << 16) | (0x3e1fc >> 2),
866 	0x00000000,
867 	(0x0e00 << 16) | (0x3c210 >> 2),
868 	0x00000000,
869 	(0x0e00 << 16) | (0x3c214 >> 2),
870 	0x00000000,
871 	(0x0e00 << 16) | (0x3c218 >> 2),
872 	0x00000000,
873 	(0x0e00 << 16) | (0x8904 >> 2),
874 	0x00000000,
875 	0x5,
876 	(0x0e00 << 16) | (0x8c28 >> 2),
877 	(0x0e00 << 16) | (0x8c2c >> 2),
878 	(0x0e00 << 16) | (0x8c30 >> 2),
879 	(0x0e00 << 16) | (0x8c34 >> 2),
880 	(0x0e00 << 16) | (0x9600 >> 2),
881 };
882 
883 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
884 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
885 static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
886 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
887 
888 /*
889  * Core functions
890  */
891 /**
892  * gfx_v7_0_init_microcode - load ucode images from disk
893  *
894  * @adev: amdgpu_device pointer
895  *
896  * Use the firmware interface to load the ucode images into
897  * the driver (not loaded into hw).
898  * Returns 0 on success, error on failure.
899  */
900 static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
901 {
902 	const char *chip_name;
903 	char fw_name[30];
904 	int err;
905 
906 	DRM_DEBUG("\n");
907 
908 	switch (adev->asic_type) {
909 	case CHIP_BONAIRE:
910 		chip_name = "bonaire";
911 		break;
912 	case CHIP_HAWAII:
913 		chip_name = "hawaii";
914 		break;
915 	case CHIP_KAVERI:
916 		chip_name = "kaveri";
917 		break;
918 	case CHIP_KABINI:
919 		chip_name = "kabini";
920 		break;
921 	case CHIP_MULLINS:
922 		chip_name = "mullins";
923 		break;
924 	default: BUG();
925 	}
926 
927 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
928 	err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
929 	if (err)
930 		goto out;
931 	err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
932 	if (err)
933 		goto out;
934 
935 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
936 	err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
937 	if (err)
938 		goto out;
939 	err = amdgpu_ucode_validate(adev->gfx.me_fw);
940 	if (err)
941 		goto out;
942 
943 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
944 	err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
945 	if (err)
946 		goto out;
947 	err = amdgpu_ucode_validate(adev->gfx.ce_fw);
948 	if (err)
949 		goto out;
950 
951 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
952 	err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
953 	if (err)
954 		goto out;
955 	err = amdgpu_ucode_validate(adev->gfx.mec_fw);
956 	if (err)
957 		goto out;
958 
959 	if (adev->asic_type == CHIP_KAVERI) {
960 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
961 		err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
962 		if (err)
963 			goto out;
964 		err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
965 		if (err)
966 			goto out;
967 	}
968 
969 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
970 	err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
971 	if (err)
972 		goto out;
973 	err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
974 
975 out:
976 	if (err) {
977 		pr_err("gfx7: Failed to load firmware \"%s\"\n", fw_name);
978 		release_firmware(adev->gfx.pfp_fw);
979 		adev->gfx.pfp_fw = NULL;
980 		release_firmware(adev->gfx.me_fw);
981 		adev->gfx.me_fw = NULL;
982 		release_firmware(adev->gfx.ce_fw);
983 		adev->gfx.ce_fw = NULL;
984 		release_firmware(adev->gfx.mec_fw);
985 		adev->gfx.mec_fw = NULL;
986 		release_firmware(adev->gfx.mec2_fw);
987 		adev->gfx.mec2_fw = NULL;
988 		release_firmware(adev->gfx.rlc_fw);
989 		adev->gfx.rlc_fw = NULL;
990 	}
991 	return err;
992 }
993 
994 static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
995 {
996 	release_firmware(adev->gfx.pfp_fw);
997 	adev->gfx.pfp_fw = NULL;
998 	release_firmware(adev->gfx.me_fw);
999 	adev->gfx.me_fw = NULL;
1000 	release_firmware(adev->gfx.ce_fw);
1001 	adev->gfx.ce_fw = NULL;
1002 	release_firmware(adev->gfx.mec_fw);
1003 	adev->gfx.mec_fw = NULL;
1004 	release_firmware(adev->gfx.mec2_fw);
1005 	adev->gfx.mec2_fw = NULL;
1006 	release_firmware(adev->gfx.rlc_fw);
1007 	adev->gfx.rlc_fw = NULL;
1008 }
1009 
1010 /**
1011  * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
1012  *
1013  * @adev: amdgpu_device pointer
1014  *
1015  * Starting with SI, the tiling setup is done globally in a
1016  * set of 32 tiling modes.  Rather than selecting each set of
1017  * parameters per surface as on older asics, we just select
1018  * which index in the tiling table we want to use, and the
1019  * surface uses those parameters (CIK).
1020  */
1021 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
1022 {
1023 	const u32 num_tile_mode_states =
1024 			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
1025 	const u32 num_secondary_tile_mode_states =
1026 			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
1027 	u32 reg_offset, split_equal_to_row_size;
1028 	uint32_t *tile, *macrotile;
1029 
1030 	tile = adev->gfx.config.tile_mode_array;
1031 	macrotile = adev->gfx.config.macrotile_mode_array;
1032 
1033 	switch (adev->gfx.config.mem_row_size_in_kb) {
1034 	case 1:
1035 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
1036 		break;
1037 	case 2:
1038 	default:
1039 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
1040 		break;
1041 	case 4:
1042 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1043 		break;
1044 	}
1045 
1046 	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1047 		tile[reg_offset] = 0;
1048 	for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1049 		macrotile[reg_offset] = 0;
1050 
1051 	switch (adev->asic_type) {
1052 	case CHIP_BONAIRE:
1053 		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1054 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1055 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1056 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1057 		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1058 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1059 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1060 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1061 		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1062 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1063 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1064 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1065 		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1066 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1067 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1068 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1069 		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1070 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1071 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1072 			   TILE_SPLIT(split_equal_to_row_size));
1073 		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1074 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1075 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1076 		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1077 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1078 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1079 			   TILE_SPLIT(split_equal_to_row_size));
1080 		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1081 		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1082 			   PIPE_CONFIG(ADDR_SURF_P4_16x16));
1083 		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1084 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1085 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1086 		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1087 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1088 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1089 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1090 		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1091 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1092 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1093 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1094 		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1095 		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1096 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1097 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1098 		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1099 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1100 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1101 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1102 		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1103 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1104 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1105 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1106 		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1107 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1108 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1109 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1110 		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1111 		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1112 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1113 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1114 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1115 		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1116 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1117 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1118 		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1119 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1120 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1121 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1122 		tile[21] =  (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1123 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1124 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1125 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1126 		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1127 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1128 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1129 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1130 		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1131 		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1132 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1133 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1134 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1135 		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1136 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1137 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1138 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1139 		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1140 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1141 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1142 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1143 		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1144 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1145 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1146 		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1147 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1148 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1149 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1150 		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1151 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1152 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1153 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1154 		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1155 
1156 		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1157 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1158 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1159 				NUM_BANKS(ADDR_SURF_16_BANK));
1160 		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1161 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1162 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1163 				NUM_BANKS(ADDR_SURF_16_BANK));
1164 		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1165 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1166 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1167 				NUM_BANKS(ADDR_SURF_16_BANK));
1168 		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1169 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1170 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1171 				NUM_BANKS(ADDR_SURF_16_BANK));
1172 		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1173 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1174 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1175 				NUM_BANKS(ADDR_SURF_16_BANK));
1176 		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1177 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1178 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1179 				NUM_BANKS(ADDR_SURF_8_BANK));
1180 		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1181 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1182 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1183 				NUM_BANKS(ADDR_SURF_4_BANK));
1184 		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1185 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1186 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1187 				NUM_BANKS(ADDR_SURF_16_BANK));
1188 		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1189 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1190 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1191 				NUM_BANKS(ADDR_SURF_16_BANK));
1192 		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1193 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1194 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1195 				NUM_BANKS(ADDR_SURF_16_BANK));
1196 		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1197 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1198 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1199 				NUM_BANKS(ADDR_SURF_16_BANK));
1200 		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1201 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1202 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1203 				NUM_BANKS(ADDR_SURF_16_BANK));
1204 		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1205 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1206 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1207 				NUM_BANKS(ADDR_SURF_8_BANK));
1208 		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1209 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1210 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1211 				NUM_BANKS(ADDR_SURF_4_BANK));
1212 
1213 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1214 			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1215 		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1216 			if (reg_offset != 7)
1217 				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1218 		break;
1219 	case CHIP_HAWAII:
1220 		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1221 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1222 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1223 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1224 		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1225 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1226 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1227 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1228 		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1229 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1230 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1231 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1232 		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1233 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1234 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1235 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1236 		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1237 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1238 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1239 			   TILE_SPLIT(split_equal_to_row_size));
1240 		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1241 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1242 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1243 			   TILE_SPLIT(split_equal_to_row_size));
1244 		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1245 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1246 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1247 			   TILE_SPLIT(split_equal_to_row_size));
1248 		tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1249 			   PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1250 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1251 			   TILE_SPLIT(split_equal_to_row_size));
1252 		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1253 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1254 		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1255 			   PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1256 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1257 		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1258 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1259 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1260 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1261 		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1262 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1263 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1264 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1265 		tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
1266 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1267 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1268 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1269 		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1270 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1271 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1272 		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1273 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1274 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1275 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1276 		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1277 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1278 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1279 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1280 		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1281 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1282 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1283 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1284 		tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1285 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1286 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1287 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1288 		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1289 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1290 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1291 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1292 		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1293 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1294 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1295 		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1296 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1297 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1298 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1299 		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1300 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1301 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1302 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1303 		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1304 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1305 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1306 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1307 		tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1308 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1309 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1310 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1311 		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1312 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1313 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1314 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1315 		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1316 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1317 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1318 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1319 		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1320 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1321 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1322 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1323 		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1324 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1325 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1326 		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1327 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1328 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1329 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1330 		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1331 			    PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1332 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1333 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1334 		tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1335 			    PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1336 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1337 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1338 
1339 		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1340 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1341 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1342 				NUM_BANKS(ADDR_SURF_16_BANK));
1343 		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1344 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1345 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1346 				NUM_BANKS(ADDR_SURF_16_BANK));
1347 		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1348 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1349 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1350 				NUM_BANKS(ADDR_SURF_16_BANK));
1351 		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1352 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1353 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1354 				NUM_BANKS(ADDR_SURF_16_BANK));
1355 		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1356 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1357 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1358 				NUM_BANKS(ADDR_SURF_8_BANK));
1359 		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1360 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1361 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1362 				NUM_BANKS(ADDR_SURF_4_BANK));
1363 		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1364 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1365 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1366 				NUM_BANKS(ADDR_SURF_4_BANK));
1367 		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1368 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1369 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1370 				NUM_BANKS(ADDR_SURF_16_BANK));
1371 		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1372 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1373 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1374 				NUM_BANKS(ADDR_SURF_16_BANK));
1375 		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1376 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1377 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1378 				NUM_BANKS(ADDR_SURF_16_BANK));
1379 		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1380 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1381 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1382 				NUM_BANKS(ADDR_SURF_8_BANK));
1383 		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1384 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1385 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1386 				NUM_BANKS(ADDR_SURF_16_BANK));
1387 		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1388 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1389 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1390 				NUM_BANKS(ADDR_SURF_8_BANK));
1391 		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1392 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1393 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1394 				NUM_BANKS(ADDR_SURF_4_BANK));
1395 
1396 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1397 			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1398 		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1399 			if (reg_offset != 7)
1400 				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1401 		break;
1402 	case CHIP_KABINI:
1403 	case CHIP_KAVERI:
1404 	case CHIP_MULLINS:
1405 	default:
1406 		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1407 			   PIPE_CONFIG(ADDR_SURF_P2) |
1408 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1409 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1410 		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1411 			   PIPE_CONFIG(ADDR_SURF_P2) |
1412 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1413 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1414 		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1415 			   PIPE_CONFIG(ADDR_SURF_P2) |
1416 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1417 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1418 		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1419 			   PIPE_CONFIG(ADDR_SURF_P2) |
1420 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1421 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1422 		tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1423 			   PIPE_CONFIG(ADDR_SURF_P2) |
1424 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1425 			   TILE_SPLIT(split_equal_to_row_size));
1426 		tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1427 			   PIPE_CONFIG(ADDR_SURF_P2) |
1428 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1429 		tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1430 			   PIPE_CONFIG(ADDR_SURF_P2) |
1431 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
1432 			   TILE_SPLIT(split_equal_to_row_size));
1433 		tile[7] = (TILE_SPLIT(split_equal_to_row_size));
1434 		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1435 			   PIPE_CONFIG(ADDR_SURF_P2));
1436 		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1437 			   PIPE_CONFIG(ADDR_SURF_P2) |
1438 			   MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
1439 		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1440 			    PIPE_CONFIG(ADDR_SURF_P2) |
1441 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1442 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1443 		tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1444 			    PIPE_CONFIG(ADDR_SURF_P2) |
1445 			    MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1446 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1447 		tile[12] = (TILE_SPLIT(split_equal_to_row_size));
1448 		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1449 			    PIPE_CONFIG(ADDR_SURF_P2) |
1450 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
1451 		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1452 			    PIPE_CONFIG(ADDR_SURF_P2) |
1453 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1454 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1455 		tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1456 			    PIPE_CONFIG(ADDR_SURF_P2) |
1457 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1458 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1459 		tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1460 			    PIPE_CONFIG(ADDR_SURF_P2) |
1461 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1462 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1463 		tile[17] = (TILE_SPLIT(split_equal_to_row_size));
1464 		tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1465 			    PIPE_CONFIG(ADDR_SURF_P2) |
1466 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1467 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1468 		tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1469 			    PIPE_CONFIG(ADDR_SURF_P2) |
1470 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING));
1471 		tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1472 			    PIPE_CONFIG(ADDR_SURF_P2) |
1473 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1474 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1475 		tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1476 			    PIPE_CONFIG(ADDR_SURF_P2) |
1477 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1478 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1479 		tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1480 			    PIPE_CONFIG(ADDR_SURF_P2) |
1481 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1482 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1483 		tile[23] = (TILE_SPLIT(split_equal_to_row_size));
1484 		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1485 			    PIPE_CONFIG(ADDR_SURF_P2) |
1486 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1487 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1488 		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1489 			    PIPE_CONFIG(ADDR_SURF_P2) |
1490 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1491 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1492 		tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1493 			    PIPE_CONFIG(ADDR_SURF_P2) |
1494 			    MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1495 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1496 		tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1497 			    PIPE_CONFIG(ADDR_SURF_P2) |
1498 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
1499 		tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1500 			    PIPE_CONFIG(ADDR_SURF_P2) |
1501 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1502 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1503 		tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1504 			    PIPE_CONFIG(ADDR_SURF_P2) |
1505 			    MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1506 			    SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1507 		tile[30] = (TILE_SPLIT(split_equal_to_row_size));
1508 
1509 		macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1510 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1511 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1512 				NUM_BANKS(ADDR_SURF_8_BANK));
1513 		macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1514 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1515 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1516 				NUM_BANKS(ADDR_SURF_8_BANK));
1517 		macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1518 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1519 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1520 				NUM_BANKS(ADDR_SURF_8_BANK));
1521 		macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1522 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1523 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1524 				NUM_BANKS(ADDR_SURF_8_BANK));
1525 		macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1526 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1527 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1528 				NUM_BANKS(ADDR_SURF_8_BANK));
1529 		macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1530 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1531 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1532 				NUM_BANKS(ADDR_SURF_8_BANK));
1533 		macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1534 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1535 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1536 				NUM_BANKS(ADDR_SURF_8_BANK));
1537 		macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1538 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1539 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1540 				NUM_BANKS(ADDR_SURF_16_BANK));
1541 		macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
1542 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1543 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1544 				NUM_BANKS(ADDR_SURF_16_BANK));
1545 		macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1546 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1547 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1548 				NUM_BANKS(ADDR_SURF_16_BANK));
1549 		macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1550 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1551 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1552 				NUM_BANKS(ADDR_SURF_16_BANK));
1553 		macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1554 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1555 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1556 				NUM_BANKS(ADDR_SURF_16_BANK));
1557 		macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1558 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1559 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
1560 				NUM_BANKS(ADDR_SURF_16_BANK));
1561 		macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1562 				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1563 				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1564 				NUM_BANKS(ADDR_SURF_8_BANK));
1565 
1566 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1567 			WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]);
1568 		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
1569 			if (reg_offset != 7)
1570 				WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]);
1571 		break;
1572 	}
1573 }
1574 
1575 /**
1576  * gfx_v7_0_select_se_sh - select which SE, SH to address
1577  *
1578  * @adev: amdgpu_device pointer
1579  * @se_num: shader engine to address
1580  * @sh_num: sh block to address
1581  *
1582  * Select which SE, SH combinations to address. Certain
1583  * registers are instanced per SE or SH.  0xffffffff means
1584  * broadcast to all SEs or SHs (CIK).
1585  */
1586 static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
1587 				  u32 se_num, u32 sh_num, u32 instance)
1588 {
1589 	u32 data;
1590 
1591 	if (instance == 0xffffffff)
1592 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1593 	else
1594 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1595 
1596 	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1597 		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1598 			GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
1599 	else if (se_num == 0xffffffff)
1600 		data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK |
1601 			(sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT);
1602 	else if (sh_num == 0xffffffff)
1603 		data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
1604 			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1605 	else
1606 		data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) |
1607 			(se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
1608 	WREG32(mmGRBM_GFX_INDEX, data);
1609 }
1610 
1611 /**
1612  * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
1613  *
1614  * @adev: amdgpu_device pointer
1615  *
1616  * Calculates the bitmask of enabled RBs (CIK).
1617  * Returns the enabled RB bitmask.
1618  */
1619 static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1620 {
1621 	u32 data, mask;
1622 
1623 	data = RREG32(mmCC_RB_BACKEND_DISABLE);
1624 	data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1625 
1626 	data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1627 	data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1628 
1629 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1630 					 adev->gfx.config.max_sh_per_se);
1631 
1632 	return (~data) & mask;
1633 }
1634 
1635 static void
1636 gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
1637 {
1638 	switch (adev->asic_type) {
1639 	case CHIP_BONAIRE:
1640 		*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
1641 			  SE_XSEL(1) | SE_YSEL(1);
1642 		*rconf1 |= 0x0;
1643 		break;
1644 	case CHIP_HAWAII:
1645 		*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
1646 			  RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
1647 			  PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
1648 			  SE_YSEL(3);
1649 		*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
1650 			   SE_PAIR_YSEL(2);
1651 		break;
1652 	case CHIP_KAVERI:
1653 		*rconf |= RB_MAP_PKR0(2);
1654 		*rconf1 |= 0x0;
1655 		break;
1656 	case CHIP_KABINI:
1657 	case CHIP_MULLINS:
1658 		*rconf |= 0x0;
1659 		*rconf1 |= 0x0;
1660 		break;
1661 	default:
1662 		DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
1663 		break;
1664 	}
1665 }
1666 
1667 static void
1668 gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
1669 					u32 raster_config, u32 raster_config_1,
1670 					unsigned rb_mask, unsigned num_rb)
1671 {
1672 	unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
1673 	unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
1674 	unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
1675 	unsigned rb_per_se = num_rb / num_se;
1676 	unsigned se_mask[4];
1677 	unsigned se;
1678 
1679 	se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1680 	se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1681 	se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1682 	se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1683 
1684 	WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
1685 	WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
1686 	WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
1687 
1688 	if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
1689 			     (!se_mask[2] && !se_mask[3]))) {
1690 		raster_config_1 &= ~SE_PAIR_MAP_MASK;
1691 
1692 		if (!se_mask[0] && !se_mask[1]) {
1693 			raster_config_1 |=
1694 				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
1695 		} else {
1696 			raster_config_1 |=
1697 				SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
1698 		}
1699 	}
1700 
1701 	for (se = 0; se < num_se; se++) {
1702 		unsigned raster_config_se = raster_config;
1703 		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1704 		unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1705 		int idx = (se / 2) * 2;
1706 
1707 		if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1708 			raster_config_se &= ~SE_MAP_MASK;
1709 
1710 			if (!se_mask[idx]) {
1711 				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
1712 			} else {
1713 				raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
1714 			}
1715 		}
1716 
1717 		pkr0_mask &= rb_mask;
1718 		pkr1_mask &= rb_mask;
1719 		if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1720 			raster_config_se &= ~PKR_MAP_MASK;
1721 
1722 			if (!pkr0_mask) {
1723 				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
1724 			} else {
1725 				raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
1726 			}
1727 		}
1728 
1729 		if (rb_per_se >= 2) {
1730 			unsigned rb0_mask = 1 << (se * rb_per_se);
1731 			unsigned rb1_mask = rb0_mask << 1;
1732 
1733 			rb0_mask &= rb_mask;
1734 			rb1_mask &= rb_mask;
1735 			if (!rb0_mask || !rb1_mask) {
1736 				raster_config_se &= ~RB_MAP_PKR0_MASK;
1737 
1738 				if (!rb0_mask) {
1739 					raster_config_se |=
1740 						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
1741 				} else {
1742 					raster_config_se |=
1743 						RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
1744 				}
1745 			}
1746 
1747 			if (rb_per_se > 2) {
1748 				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1749 				rb1_mask = rb0_mask << 1;
1750 				rb0_mask &= rb_mask;
1751 				rb1_mask &= rb_mask;
1752 				if (!rb0_mask || !rb1_mask) {
1753 					raster_config_se &= ~RB_MAP_PKR1_MASK;
1754 
1755 					if (!rb0_mask) {
1756 						raster_config_se |=
1757 							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
1758 					} else {
1759 						raster_config_se |=
1760 							RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
1761 					}
1762 				}
1763 			}
1764 		}
1765 
1766 		/* GRBM_GFX_INDEX has a different offset on CI+ */
1767 		gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
1768 		WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
1769 		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1770 	}
1771 
1772 	/* GRBM_GFX_INDEX has a different offset on CI+ */
1773 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1774 }
1775 
1776 /**
1777  * gfx_v7_0_setup_rb - setup the RBs on the asic
1778  *
1779  * @adev: amdgpu_device pointer
1780  * @se_num: number of SEs (shader engines) for the asic
1781  * @sh_per_se: number of SH blocks per SE for the asic
1782  *
1783  * Configures per-SE/SH RB registers (CIK).
1784  */
1785 static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1786 {
1787 	int i, j;
1788 	u32 data;
1789 	u32 raster_config = 0, raster_config_1 = 0;
1790 	u32 active_rbs = 0;
1791 	u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1792 					adev->gfx.config.max_sh_per_se;
1793 	unsigned num_rb_pipes;
1794 
1795 	mutex_lock(&adev->grbm_idx_mutex);
1796 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1797 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1798 			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
1799 			data = gfx_v7_0_get_rb_active_bitmap(adev);
1800 			active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1801 					       rb_bitmap_width_per_sh);
1802 		}
1803 	}
1804 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1805 
1806 	adev->gfx.config.backend_enable_mask = active_rbs;
1807 	adev->gfx.config.num_rbs = hweight32(active_rbs);
1808 
1809 	num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
1810 			     adev->gfx.config.max_shader_engines, 16);
1811 
1812 	gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
1813 
1814 	if (!adev->gfx.config.backend_enable_mask ||
1815 			adev->gfx.config.num_rbs >= num_rb_pipes) {
1816 		WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
1817 		WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
1818 	} else {
1819 		gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
1820 							adev->gfx.config.backend_enable_mask,
1821 							num_rb_pipes);
1822 	}
1823 
1824 	/* cache the values for userspace */
1825 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1826 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1827 			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
1828 			adev->gfx.config.rb_config[i][j].rb_backend_disable =
1829 				RREG32(mmCC_RB_BACKEND_DISABLE);
1830 			adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
1831 				RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1832 			adev->gfx.config.rb_config[i][j].raster_config =
1833 				RREG32(mmPA_SC_RASTER_CONFIG);
1834 			adev->gfx.config.rb_config[i][j].raster_config_1 =
1835 				RREG32(mmPA_SC_RASTER_CONFIG_1);
1836 		}
1837 	}
1838 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1839 	mutex_unlock(&adev->grbm_idx_mutex);
1840 }
1841 
1842 /**
1843  * gfx_v7_0_init_compute_vmid - gart enable
1844  *
1845  * @adev: amdgpu_device pointer
1846  *
1847  * Initialize compute vmid sh_mem registers
1848  *
1849  */
1850 #define DEFAULT_SH_MEM_BASES	(0x6000)
1851 #define FIRST_COMPUTE_VMID	(8)
1852 #define LAST_COMPUTE_VMID	(16)
1853 static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
1854 {
1855 	int i;
1856 	uint32_t sh_mem_config;
1857 	uint32_t sh_mem_bases;
1858 
1859 	/*
1860 	 * Configure apertures:
1861 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1862 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1863 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1864 	*/
1865 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1866 	sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1867 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1868 	sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
1869 	mutex_lock(&adev->srbm_mutex);
1870 	for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1871 		cik_srbm_select(adev, 0, 0, 0, i);
1872 		/* CP and shaders */
1873 		WREG32(mmSH_MEM_CONFIG, sh_mem_config);
1874 		WREG32(mmSH_MEM_APE1_BASE, 1);
1875 		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1876 		WREG32(mmSH_MEM_BASES, sh_mem_bases);
1877 	}
1878 	cik_srbm_select(adev, 0, 0, 0, 0);
1879 	mutex_unlock(&adev->srbm_mutex);
1880 }
1881 
1882 static void gfx_v7_0_config_init(struct amdgpu_device *adev)
1883 {
1884 	adev->gfx.config.double_offchip_lds_buf = 1;
1885 }
1886 
1887 /**
1888  * gfx_v7_0_constants_init - setup the 3D engine
1889  *
1890  * @adev: amdgpu_device pointer
1891  *
1892  * init the gfx constants such as the 3D engine, tiling configuration
1893  * registers, maximum number of quad pipes, render backends...
1894  */
1895 static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
1896 {
1897 	u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base;
1898 	u32 tmp;
1899 	int i;
1900 
1901 	WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT));
1902 
1903 	WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1904 	WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
1905 	WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
1906 
1907 	gfx_v7_0_tiling_mode_table_init(adev);
1908 
1909 	gfx_v7_0_setup_rb(adev);
1910 	gfx_v7_0_get_cu_info(adev);
1911 	gfx_v7_0_config_init(adev);
1912 
1913 	/* set HW defaults for 3D engine */
1914 	WREG32(mmCP_MEQ_THRESHOLDS,
1915 	       (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) |
1916 	       (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT));
1917 
1918 	mutex_lock(&adev->grbm_idx_mutex);
1919 	/*
1920 	 * making sure that the following register writes will be broadcasted
1921 	 * to all the shaders
1922 	 */
1923 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1924 
1925 	/* XXX SH_MEM regs */
1926 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1927 	sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1928 				   SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1929 	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, DEFAULT_MTYPE,
1930 				   MTYPE_NC);
1931 	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, APE1_MTYPE,
1932 				   MTYPE_UC);
1933 	sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, PRIVATE_ATC, 0);
1934 
1935 	sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
1936 				   SWIZZLE_ENABLE, 1);
1937 	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
1938 				   ELEMENT_SIZE, 1);
1939 	sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
1940 				   INDEX_STRIDE, 3);
1941 	WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
1942 
1943 	mutex_lock(&adev->srbm_mutex);
1944 	for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
1945 		if (i == 0)
1946 			sh_mem_base = 0;
1947 		else
1948 			sh_mem_base = adev->gmc.shared_aperture_start >> 48;
1949 		cik_srbm_select(adev, 0, 0, 0, i);
1950 		/* CP and shaders */
1951 		WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
1952 		WREG32(mmSH_MEM_APE1_BASE, 1);
1953 		WREG32(mmSH_MEM_APE1_LIMIT, 0);
1954 		WREG32(mmSH_MEM_BASES, sh_mem_base);
1955 	}
1956 	cik_srbm_select(adev, 0, 0, 0, 0);
1957 	mutex_unlock(&adev->srbm_mutex);
1958 
1959 	gfx_v7_0_init_compute_vmid(adev);
1960 
1961 	WREG32(mmSX_DEBUG_1, 0x20);
1962 
1963 	WREG32(mmTA_CNTL_AUX, 0x00010000);
1964 
1965 	tmp = RREG32(mmSPI_CONFIG_CNTL);
1966 	tmp |= 0x03000000;
1967 	WREG32(mmSPI_CONFIG_CNTL, tmp);
1968 
1969 	WREG32(mmSQ_CONFIG, 1);
1970 
1971 	WREG32(mmDB_DEBUG, 0);
1972 
1973 	tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff;
1974 	tmp |= 0x00000400;
1975 	WREG32(mmDB_DEBUG2, tmp);
1976 
1977 	tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c;
1978 	tmp |= 0x00020200;
1979 	WREG32(mmDB_DEBUG3, tmp);
1980 
1981 	tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000;
1982 	tmp |= 0x00018208;
1983 	WREG32(mmCB_HW_CONTROL, tmp);
1984 
1985 	WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT));
1986 
1987 	WREG32(mmPA_SC_FIFO_SIZE,
1988 		((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1989 		(adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1990 		(adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1991 		(adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)));
1992 
1993 	WREG32(mmVGT_NUM_INSTANCES, 1);
1994 
1995 	WREG32(mmCP_PERFMON_CNTL, 0);
1996 
1997 	WREG32(mmSQ_CONFIG, 0);
1998 
1999 	WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS,
2000 		((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) |
2001 		(255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT)));
2002 
2003 	WREG32(mmVGT_CACHE_INVALIDATION,
2004 		(VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) |
2005 		(ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT));
2006 
2007 	WREG32(mmVGT_GS_VERTEX_REUSE, 16);
2008 	WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0);
2009 
2010 	WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK |
2011 			(3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT));
2012 	WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK);
2013 
2014 	tmp = RREG32(mmSPI_ARB_PRIORITY);
2015 	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
2016 	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
2017 	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
2018 	tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
2019 	WREG32(mmSPI_ARB_PRIORITY, tmp);
2020 
2021 	mutex_unlock(&adev->grbm_idx_mutex);
2022 
2023 	udelay(50);
2024 }
2025 
2026 /*
2027  * GPU scratch registers helpers function.
2028  */
2029 /**
2030  * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
2031  *
2032  * @adev: amdgpu_device pointer
2033  *
2034  * Set up the number and offset of the CP scratch registers.
2035  * NOTE: use of CP scratch registers is a legacy inferface and
2036  * is not used by default on newer asics (r6xx+).  On newer asics,
2037  * memory buffers are used for fences rather than scratch regs.
2038  */
2039 static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
2040 {
2041 	adev->gfx.scratch.num_reg = 8;
2042 	adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
2043 	adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
2044 }
2045 
2046 /**
2047  * gfx_v7_0_ring_test_ring - basic gfx ring test
2048  *
2049  * @adev: amdgpu_device pointer
2050  * @ring: amdgpu_ring structure holding ring information
2051  *
2052  * Allocate a scratch register and write to it using the gfx ring (CIK).
2053  * Provides a basic gfx ring test to verify that the ring is working.
2054  * Used by gfx_v7_0_cp_gfx_resume();
2055  * Returns 0 on success, error on failure.
2056  */
2057 static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
2058 {
2059 	struct amdgpu_device *adev = ring->adev;
2060 	uint32_t scratch;
2061 	uint32_t tmp = 0;
2062 	unsigned i;
2063 	int r;
2064 
2065 	r = amdgpu_gfx_scratch_get(adev, &scratch);
2066 	if (r)
2067 		return r;
2068 
2069 	WREG32(scratch, 0xCAFEDEAD);
2070 	r = amdgpu_ring_alloc(ring, 3);
2071 	if (r)
2072 		goto error_free_scratch;
2073 
2074 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2075 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2076 	amdgpu_ring_write(ring, 0xDEADBEEF);
2077 	amdgpu_ring_commit(ring);
2078 
2079 	for (i = 0; i < adev->usec_timeout; i++) {
2080 		tmp = RREG32(scratch);
2081 		if (tmp == 0xDEADBEEF)
2082 			break;
2083 		DRM_UDELAY(1);
2084 	}
2085 	if (i >= adev->usec_timeout)
2086 		r = -ETIMEDOUT;
2087 
2088 error_free_scratch:
2089 	amdgpu_gfx_scratch_free(adev, scratch);
2090 	return r;
2091 }
2092 
2093 /**
2094  * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
2095  *
2096  * @adev: amdgpu_device pointer
2097  * @ridx: amdgpu ring index
2098  *
2099  * Emits an hdp flush on the cp.
2100  */
2101 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2102 {
2103 	u32 ref_and_mask;
2104 	int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
2105 
2106 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2107 		switch (ring->me) {
2108 		case 1:
2109 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
2110 			break;
2111 		case 2:
2112 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
2113 			break;
2114 		default:
2115 			return;
2116 		}
2117 	} else {
2118 		ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
2119 	}
2120 
2121 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2122 	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
2123 				 WAIT_REG_MEM_FUNCTION(3) |  /* == */
2124 				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
2125 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
2126 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
2127 	amdgpu_ring_write(ring, ref_and_mask);
2128 	amdgpu_ring_write(ring, ref_and_mask);
2129 	amdgpu_ring_write(ring, 0x20); /* poll interval */
2130 }
2131 
2132 static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
2133 {
2134 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2135 	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
2136 		EVENT_INDEX(4));
2137 
2138 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2139 	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
2140 		EVENT_INDEX(0));
2141 }
2142 
2143 /**
2144  * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
2145  *
2146  * @adev: amdgpu_device pointer
2147  * @fence: amdgpu fence object
2148  *
2149  * Emits a fence sequnce number on the gfx ring and flushes
2150  * GPU caches.
2151  */
2152 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
2153 					 u64 seq, unsigned flags)
2154 {
2155 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2156 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2157 	/* Workaround for cache flush problems. First send a dummy EOP
2158 	 * event down the pipe with seq one below.
2159 	 */
2160 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2161 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2162 				 EOP_TC_ACTION_EN |
2163 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2164 				 EVENT_INDEX(5)));
2165 	amdgpu_ring_write(ring, addr & 0xfffffffc);
2166 	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2167 				DATA_SEL(1) | INT_SEL(0));
2168 	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
2169 	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
2170 
2171 	/* Then send the real EOP event down the pipe. */
2172 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2173 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2174 				 EOP_TC_ACTION_EN |
2175 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2176 				 EVENT_INDEX(5)));
2177 	amdgpu_ring_write(ring, addr & 0xfffffffc);
2178 	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
2179 				DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2180 	amdgpu_ring_write(ring, lower_32_bits(seq));
2181 	amdgpu_ring_write(ring, upper_32_bits(seq));
2182 }
2183 
2184 /**
2185  * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
2186  *
2187  * @adev: amdgpu_device pointer
2188  * @fence: amdgpu fence object
2189  *
2190  * Emits a fence sequnce number on the compute ring and flushes
2191  * GPU caches.
2192  */
2193 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
2194 					     u64 addr, u64 seq,
2195 					     unsigned flags)
2196 {
2197 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2198 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2199 
2200 	/* RELEASE_MEM - flush caches, send int */
2201 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
2202 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
2203 				 EOP_TC_ACTION_EN |
2204 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2205 				 EVENT_INDEX(5)));
2206 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2207 	amdgpu_ring_write(ring, addr & 0xfffffffc);
2208 	amdgpu_ring_write(ring, upper_32_bits(addr));
2209 	amdgpu_ring_write(ring, lower_32_bits(seq));
2210 	amdgpu_ring_write(ring, upper_32_bits(seq));
2211 }
2212 
2213 /*
2214  * IB stuff
2215  */
2216 /**
2217  * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
2218  *
2219  * @ring: amdgpu_ring structure holding ring information
2220  * @ib: amdgpu indirect buffer object
2221  *
2222  * Emits an DE (drawing engine) or CE (constant engine) IB
2223  * on the gfx ring.  IBs are usually generated by userspace
2224  * acceleration drivers and submitted to the kernel for
2225  * sheduling on the ring.  This function schedules the IB
2226  * on the gfx ring for execution by the GPU.
2227  */
2228 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2229 					struct amdgpu_job *job,
2230 					struct amdgpu_ib *ib,
2231 					uint32_t flags)
2232 {
2233 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2234 	u32 header, control = 0;
2235 
2236 	/* insert SWITCH_BUFFER packet before first IB in the ring frame */
2237 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2238 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2239 		amdgpu_ring_write(ring, 0);
2240 	}
2241 
2242 	if (ib->flags & AMDGPU_IB_FLAG_CE)
2243 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
2244 	else
2245 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2246 
2247 	control |= ib->length_dw | (vmid << 24);
2248 
2249 	amdgpu_ring_write(ring, header);
2250 	amdgpu_ring_write(ring,
2251 #ifdef __BIG_ENDIAN
2252 			  (2 << 0) |
2253 #endif
2254 			  (ib->gpu_addr & 0xFFFFFFFC));
2255 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2256 	amdgpu_ring_write(ring, control);
2257 }
2258 
2259 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2260 					  struct amdgpu_job *job,
2261 					  struct amdgpu_ib *ib,
2262 					  uint32_t flags)
2263 {
2264 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2265 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2266 
2267 	/* Currently, there is a high possibility to get wave ID mismatch
2268 	 * between ME and GDS, leading to a hw deadlock, because ME generates
2269 	 * different wave IDs than the GDS expects. This situation happens
2270 	 * randomly when at least 5 compute pipes use GDS ordered append.
2271 	 * The wave IDs generated by ME are also wrong after suspend/resume.
2272 	 * Those are probably bugs somewhere else in the kernel driver.
2273 	 *
2274 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2275 	 * GDS to 0 for this ring (me/pipe).
2276 	 */
2277 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2278 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2279 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
2280 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2281 	}
2282 
2283 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2284 	amdgpu_ring_write(ring,
2285 #ifdef __BIG_ENDIAN
2286 					  (2 << 0) |
2287 #endif
2288 					  (ib->gpu_addr & 0xFFFFFFFC));
2289 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2290 	amdgpu_ring_write(ring, control);
2291 }
2292 
2293 static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
2294 {
2295 	uint32_t dw2 = 0;
2296 
2297 	dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
2298 	if (flags & AMDGPU_HAVE_CTX_SWITCH) {
2299 		gfx_v7_0_ring_emit_vgt_flush(ring);
2300 		/* set load_global_config & load_global_uconfig */
2301 		dw2 |= 0x8001;
2302 		/* set load_cs_sh_regs */
2303 		dw2 |= 0x01000000;
2304 		/* set load_per_context_state & load_gfx_sh_regs */
2305 		dw2 |= 0x10002;
2306 	}
2307 
2308 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2309 	amdgpu_ring_write(ring, dw2);
2310 	amdgpu_ring_write(ring, 0);
2311 }
2312 
2313 /**
2314  * gfx_v7_0_ring_test_ib - basic ring IB test
2315  *
2316  * @ring: amdgpu_ring structure holding ring information
2317  *
2318  * Allocate an IB and execute it on the gfx ring (CIK).
2319  * Provides a basic gfx ring test to verify that IBs are working.
2320  * Returns 0 on success, error on failure.
2321  */
2322 static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2323 {
2324 	struct amdgpu_device *adev = ring->adev;
2325 	struct amdgpu_ib ib;
2326 	struct dma_fence *f = NULL;
2327 	uint32_t scratch;
2328 	uint32_t tmp = 0;
2329 	long r;
2330 
2331 	r = amdgpu_gfx_scratch_get(adev, &scratch);
2332 	if (r)
2333 		return r;
2334 
2335 	WREG32(scratch, 0xCAFEDEAD);
2336 	memset(&ib, 0, sizeof(ib));
2337 	r = amdgpu_ib_get(adev, NULL, 256, &ib);
2338 	if (r)
2339 		goto err1;
2340 
2341 	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
2342 	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
2343 	ib.ptr[2] = 0xDEADBEEF;
2344 	ib.length_dw = 3;
2345 
2346 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
2347 	if (r)
2348 		goto err2;
2349 
2350 	r = dma_fence_wait_timeout(f, false, timeout);
2351 	if (r == 0) {
2352 		r = -ETIMEDOUT;
2353 		goto err2;
2354 	} else if (r < 0) {
2355 		goto err2;
2356 	}
2357 	tmp = RREG32(scratch);
2358 	if (tmp == 0xDEADBEEF)
2359 		r = 0;
2360 	else
2361 		r = -EINVAL;
2362 
2363 err2:
2364 	amdgpu_ib_free(adev, &ib, NULL);
2365 	dma_fence_put(f);
2366 err1:
2367 	amdgpu_gfx_scratch_free(adev, scratch);
2368 	return r;
2369 }
2370 
2371 /*
2372  * CP.
2373  * On CIK, gfx and compute now have independant command processors.
2374  *
2375  * GFX
2376  * Gfx consists of a single ring and can process both gfx jobs and
2377  * compute jobs.  The gfx CP consists of three microengines (ME):
2378  * PFP - Pre-Fetch Parser
2379  * ME - Micro Engine
2380  * CE - Constant Engine
2381  * The PFP and ME make up what is considered the Drawing Engine (DE).
2382  * The CE is an asynchronous engine used for updating buffer desciptors
2383  * used by the DE so that they can be loaded into cache in parallel
2384  * while the DE is processing state update packets.
2385  *
2386  * Compute
2387  * The compute CP consists of two microengines (ME):
2388  * MEC1 - Compute MicroEngine 1
2389  * MEC2 - Compute MicroEngine 2
2390  * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2391  * The queues are exposed to userspace and are programmed directly
2392  * by the compute runtime.
2393  */
2394 /**
2395  * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2396  *
2397  * @adev: amdgpu_device pointer
2398  * @enable: enable or disable the MEs
2399  *
2400  * Halts or unhalts the gfx MEs.
2401  */
2402 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2403 {
2404 	int i;
2405 
2406 	if (enable) {
2407 		WREG32(mmCP_ME_CNTL, 0);
2408 	} else {
2409 		WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
2410 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2411 			adev->gfx.gfx_ring[i].sched.ready = false;
2412 	}
2413 	udelay(50);
2414 }
2415 
2416 /**
2417  * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2418  *
2419  * @adev: amdgpu_device pointer
2420  *
2421  * Loads the gfx PFP, ME, and CE ucode.
2422  * Returns 0 for success, -EINVAL if the ucode is not available.
2423  */
2424 static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2425 {
2426 	const struct gfx_firmware_header_v1_0 *pfp_hdr;
2427 	const struct gfx_firmware_header_v1_0 *ce_hdr;
2428 	const struct gfx_firmware_header_v1_0 *me_hdr;
2429 	const __le32 *fw_data;
2430 	unsigned i, fw_size;
2431 
2432 	if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2433 		return -EINVAL;
2434 
2435 	pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2436 	ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2437 	me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2438 
2439 	amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2440 	amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2441 	amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2442 	adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2443 	adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2444 	adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2445 	adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2446 	adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2447 	adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2448 
2449 	gfx_v7_0_cp_gfx_enable(adev, false);
2450 
2451 	/* PFP */
2452 	fw_data = (const __le32 *)
2453 		(adev->gfx.pfp_fw->data +
2454 		 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2455 	fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2456 	WREG32(mmCP_PFP_UCODE_ADDR, 0);
2457 	for (i = 0; i < fw_size; i++)
2458 		WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2459 	WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2460 
2461 	/* CE */
2462 	fw_data = (const __le32 *)
2463 		(adev->gfx.ce_fw->data +
2464 		 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2465 	fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2466 	WREG32(mmCP_CE_UCODE_ADDR, 0);
2467 	for (i = 0; i < fw_size; i++)
2468 		WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2469 	WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2470 
2471 	/* ME */
2472 	fw_data = (const __le32 *)
2473 		(adev->gfx.me_fw->data +
2474 		 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2475 	fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2476 	WREG32(mmCP_ME_RAM_WADDR, 0);
2477 	for (i = 0; i < fw_size; i++)
2478 		WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2479 	WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2480 
2481 	return 0;
2482 }
2483 
2484 /**
2485  * gfx_v7_0_cp_gfx_start - start the gfx ring
2486  *
2487  * @adev: amdgpu_device pointer
2488  *
2489  * Enables the ring and loads the clear state context and other
2490  * packets required to init the ring.
2491  * Returns 0 for success, error for failure.
2492  */
2493 static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev)
2494 {
2495 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2496 	const struct cs_section_def *sect = NULL;
2497 	const struct cs_extent_def *ext = NULL;
2498 	int r, i;
2499 
2500 	/* init the CP */
2501 	WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2502 	WREG32(mmCP_ENDIAN_SWAP, 0);
2503 	WREG32(mmCP_DEVICE_ID, 1);
2504 
2505 	gfx_v7_0_cp_gfx_enable(adev, true);
2506 
2507 	r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
2508 	if (r) {
2509 		DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2510 		return r;
2511 	}
2512 
2513 	/* init the CE partitions.  CE only used for gfx on CIK */
2514 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2515 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2516 	amdgpu_ring_write(ring, 0x8000);
2517 	amdgpu_ring_write(ring, 0x8000);
2518 
2519 	/* clear state buffer */
2520 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2521 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2522 
2523 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2524 	amdgpu_ring_write(ring, 0x80000000);
2525 	amdgpu_ring_write(ring, 0x80000000);
2526 
2527 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
2528 		for (ext = sect->section; ext->extent != NULL; ++ext) {
2529 			if (sect->id == SECT_CONTEXT) {
2530 				amdgpu_ring_write(ring,
2531 						  PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
2532 				amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2533 				for (i = 0; i < ext->reg_count; i++)
2534 					amdgpu_ring_write(ring, ext->extent[i]);
2535 			}
2536 		}
2537 	}
2538 
2539 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2540 	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2541 	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
2542 	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
2543 
2544 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2545 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2546 
2547 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2548 	amdgpu_ring_write(ring, 0);
2549 
2550 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
2551 	amdgpu_ring_write(ring, 0x00000316);
2552 	amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2553 	amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2554 
2555 	amdgpu_ring_commit(ring);
2556 
2557 	return 0;
2558 }
2559 
2560 /**
2561  * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2562  *
2563  * @adev: amdgpu_device pointer
2564  *
2565  * Program the location and size of the gfx ring buffer
2566  * and test it to make sure it's working.
2567  * Returns 0 for success, error for failure.
2568  */
2569 static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
2570 {
2571 	struct amdgpu_ring *ring;
2572 	u32 tmp;
2573 	u32 rb_bufsz;
2574 	u64 rb_addr, rptr_addr;
2575 	int r;
2576 
2577 	WREG32(mmCP_SEM_WAIT_TIMER, 0x0);
2578 	if (adev->asic_type != CHIP_HAWAII)
2579 		WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
2580 
2581 	/* Set the write pointer delay */
2582 	WREG32(mmCP_RB_WPTR_DELAY, 0);
2583 
2584 	/* set the RB to use vmid 0 */
2585 	WREG32(mmCP_RB_VMID, 0);
2586 
2587 	WREG32(mmSCRATCH_ADDR, 0);
2588 
2589 	/* ring 0 - compute and gfx */
2590 	/* Set ring buffer size */
2591 	ring = &adev->gfx.gfx_ring[0];
2592 	rb_bufsz = order_base_2(ring->ring_size / 8);
2593 	tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2594 #ifdef __BIG_ENDIAN
2595 	tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT;
2596 #endif
2597 	WREG32(mmCP_RB0_CNTL, tmp);
2598 
2599 	/* Initialize the ring buffer's read and write pointers */
2600 	WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
2601 	ring->wptr = 0;
2602 	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2603 
2604 	/* set the wb address wether it's enabled or not */
2605 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2606 	WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2607 	WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
2608 
2609 	/* scratch register shadowing is no longer supported */
2610 	WREG32(mmSCRATCH_UMSK, 0);
2611 
2612 	mdelay(1);
2613 	WREG32(mmCP_RB0_CNTL, tmp);
2614 
2615 	rb_addr = ring->gpu_addr >> 8;
2616 	WREG32(mmCP_RB0_BASE, rb_addr);
2617 	WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2618 
2619 	/* start the ring */
2620 	gfx_v7_0_cp_gfx_start(adev);
2621 	r = amdgpu_ring_test_helper(ring);
2622 	if (r)
2623 		return r;
2624 
2625 	return 0;
2626 }
2627 
2628 static u64 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
2629 {
2630 	return ring->adev->wb.wb[ring->rptr_offs];
2631 }
2632 
2633 static u64 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
2634 {
2635 	struct amdgpu_device *adev = ring->adev;
2636 
2637 	return RREG32(mmCP_RB0_WPTR);
2638 }
2639 
2640 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
2641 {
2642 	struct amdgpu_device *adev = ring->adev;
2643 
2644 	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2645 	(void)RREG32(mmCP_RB0_WPTR);
2646 }
2647 
2648 static u64 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
2649 {
2650 	/* XXX check if swapping is necessary on BE */
2651 	return ring->adev->wb.wb[ring->wptr_offs];
2652 }
2653 
2654 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
2655 {
2656 	struct amdgpu_device *adev = ring->adev;
2657 
2658 	/* XXX check if swapping is necessary on BE */
2659 	adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
2660 	WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2661 }
2662 
2663 /**
2664  * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2665  *
2666  * @adev: amdgpu_device pointer
2667  * @enable: enable or disable the MEs
2668  *
2669  * Halts or unhalts the compute MEs.
2670  */
2671 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2672 {
2673 	int i;
2674 
2675 	if (enable) {
2676 		WREG32(mmCP_MEC_CNTL, 0);
2677 	} else {
2678 		WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2679 		for (i = 0; i < adev->gfx.num_compute_rings; i++)
2680 			adev->gfx.compute_ring[i].sched.ready = false;
2681 	}
2682 	udelay(50);
2683 }
2684 
2685 /**
2686  * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
2687  *
2688  * @adev: amdgpu_device pointer
2689  *
2690  * Loads the compute MEC1&2 ucode.
2691  * Returns 0 for success, -EINVAL if the ucode is not available.
2692  */
2693 static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2694 {
2695 	const struct gfx_firmware_header_v1_0 *mec_hdr;
2696 	const __le32 *fw_data;
2697 	unsigned i, fw_size;
2698 
2699 	if (!adev->gfx.mec_fw)
2700 		return -EINVAL;
2701 
2702 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2703 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2704 	adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2705 	adev->gfx.mec_feature_version = le32_to_cpu(
2706 					mec_hdr->ucode_feature_version);
2707 
2708 	gfx_v7_0_cp_compute_enable(adev, false);
2709 
2710 	/* MEC1 */
2711 	fw_data = (const __le32 *)
2712 		(adev->gfx.mec_fw->data +
2713 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2714 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
2715 	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2716 	for (i = 0; i < fw_size; i++)
2717 		WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
2718 	WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
2719 
2720 	if (adev->asic_type == CHIP_KAVERI) {
2721 		const struct gfx_firmware_header_v1_0 *mec2_hdr;
2722 
2723 		if (!adev->gfx.mec2_fw)
2724 			return -EINVAL;
2725 
2726 		mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2727 		amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2728 		adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2729 		adev->gfx.mec2_feature_version = le32_to_cpu(
2730 				mec2_hdr->ucode_feature_version);
2731 
2732 		/* MEC2 */
2733 		fw_data = (const __le32 *)
2734 			(adev->gfx.mec2_fw->data +
2735 			 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
2736 		fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
2737 		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2738 		for (i = 0; i < fw_size; i++)
2739 			WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
2740 		WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
2741 	}
2742 
2743 	return 0;
2744 }
2745 
2746 /**
2747  * gfx_v7_0_cp_compute_fini - stop the compute queues
2748  *
2749  * @adev: amdgpu_device pointer
2750  *
2751  * Stop the compute queues and tear down the driver queue
2752  * info.
2753  */
2754 static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
2755 {
2756 	int i;
2757 
2758 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2759 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2760 
2761 		amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
2762 	}
2763 }
2764 
2765 static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
2766 {
2767 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
2768 }
2769 
2770 static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
2771 {
2772 	int r;
2773 	u32 *hpd;
2774 	size_t mec_hpd_size;
2775 
2776 	bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
2777 
2778 	/* take ownership of the relevant compute queues */
2779 	amdgpu_gfx_compute_queue_acquire(adev);
2780 
2781 	/* allocate space for ALL pipes (even the ones we don't own) */
2782 	mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
2783 		* GFX7_MEC_HPD_SIZE * 2;
2784 
2785 	r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
2786 				      AMDGPU_GEM_DOMAIN_VRAM,
2787 				      &adev->gfx.mec.hpd_eop_obj,
2788 				      &adev->gfx.mec.hpd_eop_gpu_addr,
2789 				      (void **)&hpd);
2790 	if (r) {
2791 		dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
2792 		gfx_v7_0_mec_fini(adev);
2793 		return r;
2794 	}
2795 
2796 	/* clear memory.  Not sure if this is required or not */
2797 	memset(hpd, 0, mec_hpd_size);
2798 
2799 	amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
2800 	amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
2801 
2802 	return 0;
2803 }
2804 
2805 struct hqd_registers
2806 {
2807 	u32 cp_mqd_base_addr;
2808 	u32 cp_mqd_base_addr_hi;
2809 	u32 cp_hqd_active;
2810 	u32 cp_hqd_vmid;
2811 	u32 cp_hqd_persistent_state;
2812 	u32 cp_hqd_pipe_priority;
2813 	u32 cp_hqd_queue_priority;
2814 	u32 cp_hqd_quantum;
2815 	u32 cp_hqd_pq_base;
2816 	u32 cp_hqd_pq_base_hi;
2817 	u32 cp_hqd_pq_rptr;
2818 	u32 cp_hqd_pq_rptr_report_addr;
2819 	u32 cp_hqd_pq_rptr_report_addr_hi;
2820 	u32 cp_hqd_pq_wptr_poll_addr;
2821 	u32 cp_hqd_pq_wptr_poll_addr_hi;
2822 	u32 cp_hqd_pq_doorbell_control;
2823 	u32 cp_hqd_pq_wptr;
2824 	u32 cp_hqd_pq_control;
2825 	u32 cp_hqd_ib_base_addr;
2826 	u32 cp_hqd_ib_base_addr_hi;
2827 	u32 cp_hqd_ib_rptr;
2828 	u32 cp_hqd_ib_control;
2829 	u32 cp_hqd_iq_timer;
2830 	u32 cp_hqd_iq_rptr;
2831 	u32 cp_hqd_dequeue_request;
2832 	u32 cp_hqd_dma_offload;
2833 	u32 cp_hqd_sema_cmd;
2834 	u32 cp_hqd_msg_type;
2835 	u32 cp_hqd_atomic0_preop_lo;
2836 	u32 cp_hqd_atomic0_preop_hi;
2837 	u32 cp_hqd_atomic1_preop_lo;
2838 	u32 cp_hqd_atomic1_preop_hi;
2839 	u32 cp_hqd_hq_scheduler0;
2840 	u32 cp_hqd_hq_scheduler1;
2841 	u32 cp_mqd_control;
2842 };
2843 
2844 static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev,
2845 				       int mec, int pipe)
2846 {
2847 	u64 eop_gpu_addr;
2848 	u32 tmp;
2849 	size_t eop_offset = (mec * adev->gfx.mec.num_pipe_per_mec + pipe)
2850 			    * GFX7_MEC_HPD_SIZE * 2;
2851 
2852 	mutex_lock(&adev->srbm_mutex);
2853 	eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset;
2854 
2855 	cik_srbm_select(adev, mec + 1, pipe, 0, 0);
2856 
2857 	/* write the EOP addr */
2858 	WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
2859 	WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
2860 
2861 	/* set the VMID assigned */
2862 	WREG32(mmCP_HPD_EOP_VMID, 0);
2863 
2864 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2865 	tmp = RREG32(mmCP_HPD_EOP_CONTROL);
2866 	tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK;
2867 	tmp |= order_base_2(GFX7_MEC_HPD_SIZE / 8);
2868 	WREG32(mmCP_HPD_EOP_CONTROL, tmp);
2869 
2870 	cik_srbm_select(adev, 0, 0, 0, 0);
2871 	mutex_unlock(&adev->srbm_mutex);
2872 }
2873 
2874 static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev)
2875 {
2876 	int i;
2877 
2878 	/* disable the queue if it's active */
2879 	if (RREG32(mmCP_HQD_ACTIVE) & 1) {
2880 		WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
2881 		for (i = 0; i < adev->usec_timeout; i++) {
2882 			if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
2883 				break;
2884 			udelay(1);
2885 		}
2886 
2887 		if (i == adev->usec_timeout)
2888 			return -ETIMEDOUT;
2889 
2890 		WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
2891 		WREG32(mmCP_HQD_PQ_RPTR, 0);
2892 		WREG32(mmCP_HQD_PQ_WPTR, 0);
2893 	}
2894 
2895 	return 0;
2896 }
2897 
2898 static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
2899 			     struct cik_mqd *mqd,
2900 			     uint64_t mqd_gpu_addr,
2901 			     struct amdgpu_ring *ring)
2902 {
2903 	u64 hqd_gpu_addr;
2904 	u64 wb_gpu_addr;
2905 
2906 	/* init the mqd struct */
2907 	memset(mqd, 0, sizeof(struct cik_mqd));
2908 
2909 	mqd->header = 0xC0310800;
2910 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2911 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2912 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2913 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2914 
2915 	/* enable doorbell? */
2916 	mqd->cp_hqd_pq_doorbell_control =
2917 		RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
2918 	if (ring->use_doorbell)
2919 		mqd->cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2920 	else
2921 		mqd->cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2922 
2923 	/* set the pointer to the MQD */
2924 	mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
2925 	mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
2926 
2927 	/* set MQD vmid to 0 */
2928 	mqd->cp_mqd_control = RREG32(mmCP_MQD_CONTROL);
2929 	mqd->cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK;
2930 
2931 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2932 	hqd_gpu_addr = ring->gpu_addr >> 8;
2933 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2934 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2935 
2936 	/* set up the HQD, this is similar to CP_RB0_CNTL */
2937 	mqd->cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL);
2938 	mqd->cp_hqd_pq_control &=
2939 		~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK |
2940 				CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK);
2941 
2942 	mqd->cp_hqd_pq_control |=
2943 		order_base_2(ring->ring_size / 8);
2944 	mqd->cp_hqd_pq_control |=
2945 		(order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8);
2946 #ifdef __BIG_ENDIAN
2947 	mqd->cp_hqd_pq_control |=
2948 		2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT;
2949 #endif
2950 	mqd->cp_hqd_pq_control &=
2951 		~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK |
2952 				CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK |
2953 				CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK);
2954 	mqd->cp_hqd_pq_control |=
2955 		CP_HQD_PQ_CONTROL__PRIV_STATE_MASK |
2956 		CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */
2957 
2958 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2959 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2960 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2961 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2962 
2963 	/* set the wb address wether it's enabled or not */
2964 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2965 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2966 	mqd->cp_hqd_pq_rptr_report_addr_hi =
2967 		upper_32_bits(wb_gpu_addr) & 0xffff;
2968 
2969 	/* enable the doorbell if requested */
2970 	if (ring->use_doorbell) {
2971 		mqd->cp_hqd_pq_doorbell_control =
2972 			RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
2973 		mqd->cp_hqd_pq_doorbell_control &=
2974 			~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK;
2975 		mqd->cp_hqd_pq_doorbell_control |=
2976 			(ring->doorbell_index <<
2977 			 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT);
2978 		mqd->cp_hqd_pq_doorbell_control |=
2979 			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK;
2980 		mqd->cp_hqd_pq_doorbell_control &=
2981 			~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK |
2982 					CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK);
2983 
2984 	} else {
2985 		mqd->cp_hqd_pq_doorbell_control = 0;
2986 	}
2987 
2988 	/* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2989 	ring->wptr = 0;
2990 	mqd->cp_hqd_pq_wptr = lower_32_bits(ring->wptr);
2991 	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
2992 
2993 	/* set the vmid for the queue */
2994 	mqd->cp_hqd_vmid = 0;
2995 
2996 	/* defaults */
2997 	mqd->cp_hqd_ib_control = RREG32(mmCP_HQD_IB_CONTROL);
2998 	mqd->cp_hqd_ib_base_addr_lo = RREG32(mmCP_HQD_IB_BASE_ADDR);
2999 	mqd->cp_hqd_ib_base_addr_hi = RREG32(mmCP_HQD_IB_BASE_ADDR_HI);
3000 	mqd->cp_hqd_ib_rptr = RREG32(mmCP_HQD_IB_RPTR);
3001 	mqd->cp_hqd_persistent_state = RREG32(mmCP_HQD_PERSISTENT_STATE);
3002 	mqd->cp_hqd_sema_cmd = RREG32(mmCP_HQD_SEMA_CMD);
3003 	mqd->cp_hqd_msg_type = RREG32(mmCP_HQD_MSG_TYPE);
3004 	mqd->cp_hqd_atomic0_preop_lo = RREG32(mmCP_HQD_ATOMIC0_PREOP_LO);
3005 	mqd->cp_hqd_atomic0_preop_hi = RREG32(mmCP_HQD_ATOMIC0_PREOP_HI);
3006 	mqd->cp_hqd_atomic1_preop_lo = RREG32(mmCP_HQD_ATOMIC1_PREOP_LO);
3007 	mqd->cp_hqd_atomic1_preop_hi = RREG32(mmCP_HQD_ATOMIC1_PREOP_HI);
3008 	mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3009 	mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
3010 	mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
3011 	mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
3012 	mqd->cp_hqd_iq_rptr = RREG32(mmCP_HQD_IQ_RPTR);
3013 
3014 	/* activate the queue */
3015 	mqd->cp_hqd_active = 1;
3016 }
3017 
3018 int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd)
3019 {
3020 	uint32_t tmp;
3021 	uint32_t mqd_reg;
3022 	uint32_t *mqd_data;
3023 
3024 	/* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_MQD_CONTROL */
3025 	mqd_data = &mqd->cp_mqd_base_addr_lo;
3026 
3027 	/* disable wptr polling */
3028 	tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
3029 	tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3030 	WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
3031 
3032 	/* program all HQD registers */
3033 	for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_MQD_CONTROL; mqd_reg++)
3034 		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
3035 
3036 	/* activate the HQD */
3037 	for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
3038 		WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
3039 
3040 	return 0;
3041 }
3042 
3043 static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
3044 {
3045 	int r;
3046 	u64 mqd_gpu_addr;
3047 	struct cik_mqd *mqd;
3048 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
3049 
3050 	r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
3051 				      AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
3052 				      &mqd_gpu_addr, (void **)&mqd);
3053 	if (r) {
3054 		dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3055 		return r;
3056 	}
3057 
3058 	mutex_lock(&adev->srbm_mutex);
3059 	cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3060 
3061 	gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring);
3062 	gfx_v7_0_mqd_deactivate(adev);
3063 	gfx_v7_0_mqd_commit(adev, mqd);
3064 
3065 	cik_srbm_select(adev, 0, 0, 0, 0);
3066 	mutex_unlock(&adev->srbm_mutex);
3067 
3068 	amdgpu_bo_kunmap(ring->mqd_obj);
3069 	amdgpu_bo_unreserve(ring->mqd_obj);
3070 	return 0;
3071 }
3072 
3073 /**
3074  * gfx_v7_0_cp_compute_resume - setup the compute queue registers
3075  *
3076  * @adev: amdgpu_device pointer
3077  *
3078  * Program the compute queues and test them to make sure they
3079  * are working.
3080  * Returns 0 for success, error for failure.
3081  */
3082 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
3083 {
3084 	int r, i, j;
3085 	u32 tmp;
3086 	struct amdgpu_ring *ring;
3087 
3088 	/* fix up chicken bits */
3089 	tmp = RREG32(mmCP_CPF_DEBUG);
3090 	tmp |= (1 << 23);
3091 	WREG32(mmCP_CPF_DEBUG, tmp);
3092 
3093 	/* init all pipes (even the ones we don't own) */
3094 	for (i = 0; i < adev->gfx.mec.num_mec; i++)
3095 		for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++)
3096 			gfx_v7_0_compute_pipe_init(adev, i, j);
3097 
3098 	/* init the queues */
3099 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3100 		r = gfx_v7_0_compute_queue_init(adev, i);
3101 		if (r) {
3102 			gfx_v7_0_cp_compute_fini(adev);
3103 			return r;
3104 		}
3105 	}
3106 
3107 	gfx_v7_0_cp_compute_enable(adev, true);
3108 
3109 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3110 		ring = &adev->gfx.compute_ring[i];
3111 		amdgpu_ring_test_helper(ring);
3112 	}
3113 
3114 	return 0;
3115 }
3116 
3117 static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable)
3118 {
3119 	gfx_v7_0_cp_gfx_enable(adev, enable);
3120 	gfx_v7_0_cp_compute_enable(adev, enable);
3121 }
3122 
3123 static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev)
3124 {
3125 	int r;
3126 
3127 	r = gfx_v7_0_cp_gfx_load_microcode(adev);
3128 	if (r)
3129 		return r;
3130 	r = gfx_v7_0_cp_compute_load_microcode(adev);
3131 	if (r)
3132 		return r;
3133 
3134 	return 0;
3135 }
3136 
3137 static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3138 					       bool enable)
3139 {
3140 	u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3141 
3142 	if (enable)
3143 		tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3144 				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3145 	else
3146 		tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK |
3147 				CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK);
3148 	WREG32(mmCP_INT_CNTL_RING0, tmp);
3149 }
3150 
3151 static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3152 {
3153 	int r;
3154 
3155 	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3156 
3157 	r = gfx_v7_0_cp_load_microcode(adev);
3158 	if (r)
3159 		return r;
3160 
3161 	r = gfx_v7_0_cp_gfx_resume(adev);
3162 	if (r)
3163 		return r;
3164 	r = gfx_v7_0_cp_compute_resume(adev);
3165 	if (r)
3166 		return r;
3167 
3168 	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3169 
3170 	return 0;
3171 }
3172 
3173 /**
3174  * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3175  *
3176  * @ring: the ring to emmit the commands to
3177  *
3178  * Sync the command pipeline with the PFP. E.g. wait for everything
3179  * to be completed.
3180  */
3181 static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3182 {
3183 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3184 	uint32_t seq = ring->fence_drv.sync_seq;
3185 	uint64_t addr = ring->fence_drv.gpu_addr;
3186 
3187 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3188 	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3189 				 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3190 				 WAIT_REG_MEM_ENGINE(usepfp)));   /* pfp or me */
3191 	amdgpu_ring_write(ring, addr & 0xfffffffc);
3192 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3193 	amdgpu_ring_write(ring, seq);
3194 	amdgpu_ring_write(ring, 0xffffffff);
3195 	amdgpu_ring_write(ring, 4); /* poll interval */
3196 
3197 	if (usepfp) {
3198 		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
3199 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3200 		amdgpu_ring_write(ring, 0);
3201 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3202 		amdgpu_ring_write(ring, 0);
3203 	}
3204 }
3205 
3206 /*
3207  * vm
3208  * VMID 0 is the physical GPU addresses as used by the kernel.
3209  * VMIDs 1-15 are used for userspace clients and are handled
3210  * by the amdgpu vm/hsa code.
3211  */
3212 /**
3213  * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3214  *
3215  * @adev: amdgpu_device pointer
3216  *
3217  * Update the page table base and flush the VM TLB
3218  * using the CP (CIK).
3219  */
3220 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3221 					unsigned vmid, uint64_t pd_addr)
3222 {
3223 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3224 
3225 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3226 
3227 	/* wait for the invalidate to complete */
3228 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3229 	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3230 				 WAIT_REG_MEM_FUNCTION(0) |  /* always */
3231 				 WAIT_REG_MEM_ENGINE(0))); /* me */
3232 	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
3233 	amdgpu_ring_write(ring, 0);
3234 	amdgpu_ring_write(ring, 0); /* ref */
3235 	amdgpu_ring_write(ring, 0); /* mask */
3236 	amdgpu_ring_write(ring, 0x20); /* poll interval */
3237 
3238 	/* compute doesn't have PFP */
3239 	if (usepfp) {
3240 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
3241 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3242 		amdgpu_ring_write(ring, 0x0);
3243 
3244 		/* synce CE with ME to prevent CE fetch CEIB before context switch done */
3245 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3246 		amdgpu_ring_write(ring, 0);
3247 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3248 		amdgpu_ring_write(ring, 0);
3249 	}
3250 }
3251 
3252 static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
3253 				    uint32_t reg, uint32_t val)
3254 {
3255 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3256 
3257 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3258 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
3259 				 WRITE_DATA_DST_SEL(0)));
3260 	amdgpu_ring_write(ring, reg);
3261 	amdgpu_ring_write(ring, 0);
3262 	amdgpu_ring_write(ring, val);
3263 }
3264 
3265 /*
3266  * RLC
3267  * The RLC is a multi-purpose microengine that handles a
3268  * variety of functions.
3269  */
3270 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3271 {
3272 	const u32 *src_ptr;
3273 	u32 dws;
3274 	const struct cs_section_def *cs_data;
3275 	int r;
3276 
3277 	/* allocate rlc buffers */
3278 	if (adev->flags & AMD_IS_APU) {
3279 		if (adev->asic_type == CHIP_KAVERI) {
3280 			adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
3281 			adev->gfx.rlc.reg_list_size =
3282 				(u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
3283 		} else {
3284 			adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list;
3285 			adev->gfx.rlc.reg_list_size =
3286 				(u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
3287 		}
3288 	}
3289 	adev->gfx.rlc.cs_data = ci_cs_data;
3290 	adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
3291 	adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
3292 
3293 	src_ptr = adev->gfx.rlc.reg_list;
3294 	dws = adev->gfx.rlc.reg_list_size;
3295 	dws += (5 * 16) + 48 + 48 + 64;
3296 
3297 	cs_data = adev->gfx.rlc.cs_data;
3298 
3299 	if (src_ptr) {
3300 		/* init save restore block */
3301 		r = amdgpu_gfx_rlc_init_sr(adev, dws);
3302 		if (r)
3303 			return r;
3304 	}
3305 
3306 	if (cs_data) {
3307 		/* init clear state block */
3308 		r = amdgpu_gfx_rlc_init_csb(adev);
3309 		if (r)
3310 			return r;
3311 	}
3312 
3313 	if (adev->gfx.rlc.cp_table_size) {
3314 		r = amdgpu_gfx_rlc_init_cpt(adev);
3315 		if (r)
3316 			return r;
3317 	}
3318 
3319 	return 0;
3320 }
3321 
3322 static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
3323 {
3324 	u32 tmp;
3325 
3326 	tmp = RREG32(mmRLC_LB_CNTL);
3327 	if (enable)
3328 		tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3329 	else
3330 		tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK;
3331 	WREG32(mmRLC_LB_CNTL, tmp);
3332 }
3333 
3334 static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3335 {
3336 	u32 i, j, k;
3337 	u32 mask;
3338 
3339 	mutex_lock(&adev->grbm_idx_mutex);
3340 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3341 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3342 			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
3343 			for (k = 0; k < adev->usec_timeout; k++) {
3344 				if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3345 					break;
3346 				udelay(1);
3347 			}
3348 		}
3349 	}
3350 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3351 	mutex_unlock(&adev->grbm_idx_mutex);
3352 
3353 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3354 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3355 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3356 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3357 	for (k = 0; k < adev->usec_timeout; k++) {
3358 		if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3359 			break;
3360 		udelay(1);
3361 	}
3362 }
3363 
3364 static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
3365 {
3366 	u32 tmp;
3367 
3368 	tmp = RREG32(mmRLC_CNTL);
3369 	if (tmp != rlc)
3370 		WREG32(mmRLC_CNTL, rlc);
3371 }
3372 
3373 static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3374 {
3375 	u32 data, orig;
3376 
3377 	orig = data = RREG32(mmRLC_CNTL);
3378 
3379 	if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) {
3380 		u32 i;
3381 
3382 		data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK;
3383 		WREG32(mmRLC_CNTL, data);
3384 
3385 		for (i = 0; i < adev->usec_timeout; i++) {
3386 			if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0)
3387 				break;
3388 			udelay(1);
3389 		}
3390 
3391 		gfx_v7_0_wait_for_rlc_serdes(adev);
3392 	}
3393 
3394 	return orig;
3395 }
3396 
3397 static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
3398 {
3399 	return true;
3400 }
3401 
3402 static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
3403 {
3404 	u32 tmp, i, mask;
3405 
3406 	tmp = 0x1 | (1 << 1);
3407 	WREG32(mmRLC_GPR_REG2, tmp);
3408 
3409 	mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK |
3410 		RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK;
3411 	for (i = 0; i < adev->usec_timeout; i++) {
3412 		if ((RREG32(mmRLC_GPM_STAT) & mask) == mask)
3413 			break;
3414 		udelay(1);
3415 	}
3416 
3417 	for (i = 0; i < adev->usec_timeout; i++) {
3418 		if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0)
3419 			break;
3420 		udelay(1);
3421 	}
3422 }
3423 
3424 static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
3425 {
3426 	u32 tmp;
3427 
3428 	tmp = 0x1 | (0 << 1);
3429 	WREG32(mmRLC_GPR_REG2, tmp);
3430 }
3431 
3432 /**
3433  * gfx_v7_0_rlc_stop - stop the RLC ME
3434  *
3435  * @adev: amdgpu_device pointer
3436  *
3437  * Halt the RLC ME (MicroEngine) (CIK).
3438  */
3439 static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3440 {
3441 	WREG32(mmRLC_CNTL, 0);
3442 
3443 	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3444 
3445 	gfx_v7_0_wait_for_rlc_serdes(adev);
3446 }
3447 
3448 /**
3449  * gfx_v7_0_rlc_start - start the RLC ME
3450  *
3451  * @adev: amdgpu_device pointer
3452  *
3453  * Unhalt the RLC ME (MicroEngine) (CIK).
3454  */
3455 static void gfx_v7_0_rlc_start(struct amdgpu_device *adev)
3456 {
3457 	WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK);
3458 
3459 	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3460 
3461 	udelay(50);
3462 }
3463 
3464 static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev)
3465 {
3466 	u32 tmp = RREG32(mmGRBM_SOFT_RESET);
3467 
3468 	tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3469 	WREG32(mmGRBM_SOFT_RESET, tmp);
3470 	udelay(50);
3471 	tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
3472 	WREG32(mmGRBM_SOFT_RESET, tmp);
3473 	udelay(50);
3474 }
3475 
3476 /**
3477  * gfx_v7_0_rlc_resume - setup the RLC hw
3478  *
3479  * @adev: amdgpu_device pointer
3480  *
3481  * Initialize the RLC registers, load the ucode,
3482  * and start the RLC (CIK).
3483  * Returns 0 for success, -EINVAL if the ucode is not available.
3484  */
3485 static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
3486 {
3487 	const struct rlc_firmware_header_v1_0 *hdr;
3488 	const __le32 *fw_data;
3489 	unsigned i, fw_size;
3490 	u32 tmp;
3491 
3492 	if (!adev->gfx.rlc_fw)
3493 		return -EINVAL;
3494 
3495 	hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
3496 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
3497 	adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
3498 	adev->gfx.rlc_feature_version = le32_to_cpu(
3499 					hdr->ucode_feature_version);
3500 
3501 	adev->gfx.rlc.funcs->stop(adev);
3502 
3503 	/* disable CG */
3504 	tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
3505 	WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
3506 
3507 	adev->gfx.rlc.funcs->reset(adev);
3508 
3509 	gfx_v7_0_init_pg(adev);
3510 
3511 	WREG32(mmRLC_LB_CNTR_INIT, 0);
3512 	WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
3513 
3514 	mutex_lock(&adev->grbm_idx_mutex);
3515 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3516 	WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
3517 	WREG32(mmRLC_LB_PARAMS, 0x00600408);
3518 	WREG32(mmRLC_LB_CNTL, 0x80000004);
3519 	mutex_unlock(&adev->grbm_idx_mutex);
3520 
3521 	WREG32(mmRLC_MC_CNTL, 0);
3522 	WREG32(mmRLC_UCODE_CNTL, 0);
3523 
3524 	fw_data = (const __le32 *)
3525 		(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3526 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3527 	WREG32(mmRLC_GPM_UCODE_ADDR, 0);
3528 	for (i = 0; i < fw_size; i++)
3529 		WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3530 	WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3531 
3532 	/* XXX - find out what chips support lbpw */
3533 	gfx_v7_0_enable_lbpw(adev, false);
3534 
3535 	if (adev->asic_type == CHIP_BONAIRE)
3536 		WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
3537 
3538 	adev->gfx.rlc.funcs->start(adev);
3539 
3540 	return 0;
3541 }
3542 
3543 static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3544 {
3545 	u32 data, orig, tmp, tmp2;
3546 
3547 	orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
3548 
3549 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3550 		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3551 
3552 		tmp = gfx_v7_0_halt_rlc(adev);
3553 
3554 		mutex_lock(&adev->grbm_idx_mutex);
3555 		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3556 		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3557 		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3558 		tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3559 			RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK |
3560 			RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK;
3561 		WREG32(mmRLC_SERDES_WR_CTRL, tmp2);
3562 		mutex_unlock(&adev->grbm_idx_mutex);
3563 
3564 		gfx_v7_0_update_rlc(adev, tmp);
3565 
3566 		data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3567 		if (orig != data)
3568 			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3569 
3570 	} else {
3571 		gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3572 
3573 		RREG32(mmCB_CGTT_SCLK_CTRL);
3574 		RREG32(mmCB_CGTT_SCLK_CTRL);
3575 		RREG32(mmCB_CGTT_SCLK_CTRL);
3576 		RREG32(mmCB_CGTT_SCLK_CTRL);
3577 
3578 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3579 		if (orig != data)
3580 			WREG32(mmRLC_CGCG_CGLS_CTRL, data);
3581 
3582 		gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3583 	}
3584 }
3585 
3586 static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3587 {
3588 	u32 data, orig, tmp = 0;
3589 
3590 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3591 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3592 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3593 				orig = data = RREG32(mmCP_MEM_SLP_CNTL);
3594 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3595 				if (orig != data)
3596 					WREG32(mmCP_MEM_SLP_CNTL, data);
3597 			}
3598 		}
3599 
3600 		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3601 		data |= 0x00000001;
3602 		data &= 0xfffffffd;
3603 		if (orig != data)
3604 			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3605 
3606 		tmp = gfx_v7_0_halt_rlc(adev);
3607 
3608 		mutex_lock(&adev->grbm_idx_mutex);
3609 		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3610 		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3611 		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3612 		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
3613 			RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK;
3614 		WREG32(mmRLC_SERDES_WR_CTRL, data);
3615 		mutex_unlock(&adev->grbm_idx_mutex);
3616 
3617 		gfx_v7_0_update_rlc(adev, tmp);
3618 
3619 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
3620 			orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3621 			data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
3622 			data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
3623 			data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
3624 			data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
3625 			if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
3626 			    (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
3627 				data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3628 			data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
3629 			data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
3630 			data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
3631 			if (orig != data)
3632 				WREG32(mmCGTS_SM_CTRL_REG, data);
3633 		}
3634 	} else {
3635 		orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
3636 		data |= 0x00000003;
3637 		if (orig != data)
3638 			WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
3639 
3640 		data = RREG32(mmRLC_MEM_SLP_CNTL);
3641 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3642 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3643 			WREG32(mmRLC_MEM_SLP_CNTL, data);
3644 		}
3645 
3646 		data = RREG32(mmCP_MEM_SLP_CNTL);
3647 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3648 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3649 			WREG32(mmCP_MEM_SLP_CNTL, data);
3650 		}
3651 
3652 		orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3653 		data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3654 		if (orig != data)
3655 			WREG32(mmCGTS_SM_CTRL_REG, data);
3656 
3657 		tmp = gfx_v7_0_halt_rlc(adev);
3658 
3659 		mutex_lock(&adev->grbm_idx_mutex);
3660 		gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3661 		WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3662 		WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3663 		data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
3664 		WREG32(mmRLC_SERDES_WR_CTRL, data);
3665 		mutex_unlock(&adev->grbm_idx_mutex);
3666 
3667 		gfx_v7_0_update_rlc(adev, tmp);
3668 	}
3669 }
3670 
3671 static void gfx_v7_0_update_cg(struct amdgpu_device *adev,
3672 			       bool enable)
3673 {
3674 	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
3675 	/* order matters! */
3676 	if (enable) {
3677 		gfx_v7_0_enable_mgcg(adev, true);
3678 		gfx_v7_0_enable_cgcg(adev, true);
3679 	} else {
3680 		gfx_v7_0_enable_cgcg(adev, false);
3681 		gfx_v7_0_enable_mgcg(adev, false);
3682 	}
3683 	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3684 }
3685 
3686 static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
3687 						bool enable)
3688 {
3689 	u32 data, orig;
3690 
3691 	orig = data = RREG32(mmRLC_PG_CNTL);
3692 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3693 		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3694 	else
3695 		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3696 	if (orig != data)
3697 		WREG32(mmRLC_PG_CNTL, data);
3698 }
3699 
3700 static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
3701 						bool enable)
3702 {
3703 	u32 data, orig;
3704 
3705 	orig = data = RREG32(mmRLC_PG_CNTL);
3706 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3707 		data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3708 	else
3709 		data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3710 	if (orig != data)
3711 		WREG32(mmRLC_PG_CNTL, data);
3712 }
3713 
3714 static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
3715 {
3716 	u32 data, orig;
3717 
3718 	orig = data = RREG32(mmRLC_PG_CNTL);
3719 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
3720 		data &= ~0x8000;
3721 	else
3722 		data |= 0x8000;
3723 	if (orig != data)
3724 		WREG32(mmRLC_PG_CNTL, data);
3725 }
3726 
3727 static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
3728 {
3729 	u32 data, orig;
3730 
3731 	orig = data = RREG32(mmRLC_PG_CNTL);
3732 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
3733 		data &= ~0x2000;
3734 	else
3735 		data |= 0x2000;
3736 	if (orig != data)
3737 		WREG32(mmRLC_PG_CNTL, data);
3738 }
3739 
3740 static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
3741 {
3742 	if (adev->asic_type == CHIP_KAVERI)
3743 		return 5;
3744 	else
3745 		return 4;
3746 }
3747 
3748 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
3749 				     bool enable)
3750 {
3751 	u32 data, orig;
3752 
3753 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
3754 		orig = data = RREG32(mmRLC_PG_CNTL);
3755 		data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3756 		if (orig != data)
3757 			WREG32(mmRLC_PG_CNTL, data);
3758 
3759 		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3760 		data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3761 		if (orig != data)
3762 			WREG32(mmRLC_AUTO_PG_CTRL, data);
3763 	} else {
3764 		orig = data = RREG32(mmRLC_PG_CNTL);
3765 		data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3766 		if (orig != data)
3767 			WREG32(mmRLC_PG_CNTL, data);
3768 
3769 		orig = data = RREG32(mmRLC_AUTO_PG_CTRL);
3770 		data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK;
3771 		if (orig != data)
3772 			WREG32(mmRLC_AUTO_PG_CTRL, data);
3773 
3774 		data = RREG32(mmDB_RENDER_CONTROL);
3775 	}
3776 }
3777 
3778 static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
3779 						 u32 bitmap)
3780 {
3781 	u32 data;
3782 
3783 	if (!bitmap)
3784 		return;
3785 
3786 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3787 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3788 
3789 	WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
3790 }
3791 
3792 static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
3793 {
3794 	u32 data, mask;
3795 
3796 	data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
3797 	data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
3798 
3799 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3800 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3801 
3802 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
3803 
3804 	return (~data) & mask;
3805 }
3806 
3807 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
3808 {
3809 	u32 tmp;
3810 
3811 	WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
3812 
3813 	tmp = RREG32(mmRLC_MAX_PG_CU);
3814 	tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
3815 	tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
3816 	WREG32(mmRLC_MAX_PG_CU, tmp);
3817 }
3818 
3819 static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
3820 					    bool enable)
3821 {
3822 	u32 data, orig;
3823 
3824 	orig = data = RREG32(mmRLC_PG_CNTL);
3825 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
3826 		data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3827 	else
3828 		data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3829 	if (orig != data)
3830 		WREG32(mmRLC_PG_CNTL, data);
3831 }
3832 
3833 static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
3834 					     bool enable)
3835 {
3836 	u32 data, orig;
3837 
3838 	orig = data = RREG32(mmRLC_PG_CNTL);
3839 	if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
3840 		data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3841 	else
3842 		data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3843 	if (orig != data)
3844 		WREG32(mmRLC_PG_CNTL, data);
3845 }
3846 
3847 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
3848 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET    0x3D
3849 
3850 static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev)
3851 {
3852 	u32 data, orig;
3853 	u32 i;
3854 
3855 	if (adev->gfx.rlc.cs_data) {
3856 		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3857 		WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3858 		WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr));
3859 		WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size);
3860 	} else {
3861 		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
3862 		for (i = 0; i < 3; i++)
3863 			WREG32(mmRLC_GPM_SCRATCH_DATA, 0);
3864 	}
3865 	if (adev->gfx.rlc.reg_list) {
3866 		WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
3867 		for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
3868 			WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]);
3869 	}
3870 
3871 	orig = data = RREG32(mmRLC_PG_CNTL);
3872 	data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK;
3873 	if (orig != data)
3874 		WREG32(mmRLC_PG_CNTL, data);
3875 
3876 	WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
3877 	WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
3878 
3879 	data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
3880 	data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
3881 	data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3882 	WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
3883 
3884 	data = 0x10101010;
3885 	WREG32(mmRLC_PG_DELAY, data);
3886 
3887 	data = RREG32(mmRLC_PG_DELAY_2);
3888 	data &= ~0xff;
3889 	data |= 0x3;
3890 	WREG32(mmRLC_PG_DELAY_2, data);
3891 
3892 	data = RREG32(mmRLC_AUTO_PG_CTRL);
3893 	data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
3894 	data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
3895 	WREG32(mmRLC_AUTO_PG_CTRL, data);
3896 
3897 }
3898 
3899 static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
3900 {
3901 	gfx_v7_0_enable_gfx_cgpg(adev, enable);
3902 	gfx_v7_0_enable_gfx_static_mgpg(adev, enable);
3903 	gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable);
3904 }
3905 
3906 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
3907 {
3908 	u32 count = 0;
3909 	const struct cs_section_def *sect = NULL;
3910 	const struct cs_extent_def *ext = NULL;
3911 
3912 	if (adev->gfx.rlc.cs_data == NULL)
3913 		return 0;
3914 
3915 	/* begin clear state */
3916 	count += 2;
3917 	/* context control state */
3918 	count += 3;
3919 
3920 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3921 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3922 			if (sect->id == SECT_CONTEXT)
3923 				count += 2 + ext->reg_count;
3924 			else
3925 				return 0;
3926 		}
3927 	}
3928 	/* pa_sc_raster_config/pa_sc_raster_config1 */
3929 	count += 4;
3930 	/* end clear state */
3931 	count += 2;
3932 	/* clear state */
3933 	count += 2;
3934 
3935 	return count;
3936 }
3937 
3938 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
3939 				    volatile u32 *buffer)
3940 {
3941 	u32 count = 0, i;
3942 	const struct cs_section_def *sect = NULL;
3943 	const struct cs_extent_def *ext = NULL;
3944 
3945 	if (adev->gfx.rlc.cs_data == NULL)
3946 		return;
3947 	if (buffer == NULL)
3948 		return;
3949 
3950 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3951 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3952 
3953 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3954 	buffer[count++] = cpu_to_le32(0x80000000);
3955 	buffer[count++] = cpu_to_le32(0x80000000);
3956 
3957 	for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
3958 		for (ext = sect->section; ext->extent != NULL; ++ext) {
3959 			if (sect->id == SECT_CONTEXT) {
3960 				buffer[count++] =
3961 					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
3962 				buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3963 				for (i = 0; i < ext->reg_count; i++)
3964 					buffer[count++] = cpu_to_le32(ext->extent[i]);
3965 			} else {
3966 				return;
3967 			}
3968 		}
3969 	}
3970 
3971 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3972 	buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
3973 	switch (adev->asic_type) {
3974 	case CHIP_BONAIRE:
3975 		buffer[count++] = cpu_to_le32(0x16000012);
3976 		buffer[count++] = cpu_to_le32(0x00000000);
3977 		break;
3978 	case CHIP_KAVERI:
3979 		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
3980 		buffer[count++] = cpu_to_le32(0x00000000);
3981 		break;
3982 	case CHIP_KABINI:
3983 	case CHIP_MULLINS:
3984 		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
3985 		buffer[count++] = cpu_to_le32(0x00000000);
3986 		break;
3987 	case CHIP_HAWAII:
3988 		buffer[count++] = cpu_to_le32(0x3a00161a);
3989 		buffer[count++] = cpu_to_le32(0x0000002e);
3990 		break;
3991 	default:
3992 		buffer[count++] = cpu_to_le32(0x00000000);
3993 		buffer[count++] = cpu_to_le32(0x00000000);
3994 		break;
3995 	}
3996 
3997 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3998 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
3999 
4000 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
4001 	buffer[count++] = cpu_to_le32(0);
4002 }
4003 
4004 static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4005 {
4006 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4007 			      AMD_PG_SUPPORT_GFX_SMG |
4008 			      AMD_PG_SUPPORT_GFX_DMG |
4009 			      AMD_PG_SUPPORT_CP |
4010 			      AMD_PG_SUPPORT_GDS |
4011 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4012 		gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
4013 		gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
4014 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4015 			gfx_v7_0_init_gfx_cgpg(adev);
4016 			gfx_v7_0_enable_cp_pg(adev, true);
4017 			gfx_v7_0_enable_gds_pg(adev, true);
4018 		}
4019 		gfx_v7_0_init_ao_cu_mask(adev);
4020 		gfx_v7_0_update_gfx_pg(adev, true);
4021 	}
4022 }
4023 
4024 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4025 {
4026 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4027 			      AMD_PG_SUPPORT_GFX_SMG |
4028 			      AMD_PG_SUPPORT_GFX_DMG |
4029 			      AMD_PG_SUPPORT_CP |
4030 			      AMD_PG_SUPPORT_GDS |
4031 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4032 		gfx_v7_0_update_gfx_pg(adev, false);
4033 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4034 			gfx_v7_0_enable_cp_pg(adev, false);
4035 			gfx_v7_0_enable_gds_pg(adev, false);
4036 		}
4037 	}
4038 }
4039 
4040 /**
4041  * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
4042  *
4043  * @adev: amdgpu_device pointer
4044  *
4045  * Fetches a GPU clock counter snapshot (SI).
4046  * Returns the 64 bit clock counter snapshot.
4047  */
4048 static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4049 {
4050 	uint64_t clock;
4051 
4052 	mutex_lock(&adev->gfx.gpu_clock_mutex);
4053 	WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4054 	clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
4055 		((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4056 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
4057 	return clock;
4058 }
4059 
4060 static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4061 					  uint32_t vmid,
4062 					  uint32_t gds_base, uint32_t gds_size,
4063 					  uint32_t gws_base, uint32_t gws_size,
4064 					  uint32_t oa_base, uint32_t oa_size)
4065 {
4066 	/* GDS Base */
4067 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4068 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4069 				WRITE_DATA_DST_SEL(0)));
4070 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
4071 	amdgpu_ring_write(ring, 0);
4072 	amdgpu_ring_write(ring, gds_base);
4073 
4074 	/* GDS Size */
4075 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4076 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4077 				WRITE_DATA_DST_SEL(0)));
4078 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
4079 	amdgpu_ring_write(ring, 0);
4080 	amdgpu_ring_write(ring, gds_size);
4081 
4082 	/* GWS */
4083 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4084 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4085 				WRITE_DATA_DST_SEL(0)));
4086 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
4087 	amdgpu_ring_write(ring, 0);
4088 	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4089 
4090 	/* OA */
4091 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4092 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4093 				WRITE_DATA_DST_SEL(0)));
4094 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
4095 	amdgpu_ring_write(ring, 0);
4096 	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4097 }
4098 
4099 static void gfx_v7_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4100 {
4101 	struct amdgpu_device *adev = ring->adev;
4102 	uint32_t value = 0;
4103 
4104 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
4105 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4106 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4107 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4108 	WREG32(mmSQ_CMD, value);
4109 }
4110 
4111 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
4112 {
4113 	WREG32(mmSQ_IND_INDEX,
4114 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4115 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4116 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
4117 		(SQ_IND_INDEX__FORCE_READ_MASK));
4118 	return RREG32(mmSQ_IND_DATA);
4119 }
4120 
4121 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
4122 			   uint32_t wave, uint32_t thread,
4123 			   uint32_t regno, uint32_t num, uint32_t *out)
4124 {
4125 	WREG32(mmSQ_IND_INDEX,
4126 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
4127 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
4128 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
4129 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
4130 		(SQ_IND_INDEX__FORCE_READ_MASK) |
4131 		(SQ_IND_INDEX__AUTO_INCR_MASK));
4132 	while (num--)
4133 		*(out++) = RREG32(mmSQ_IND_DATA);
4134 }
4135 
4136 static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
4137 {
4138 	/* type 0 wave data */
4139 	dst[(*no_fields)++] = 0;
4140 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
4141 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
4142 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
4143 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
4144 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
4145 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
4146 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
4147 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
4148 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
4149 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
4150 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
4151 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
4152 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
4153 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
4154 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
4155 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
4156 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
4157 	dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
4158 }
4159 
4160 static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
4161 				     uint32_t wave, uint32_t start,
4162 				     uint32_t size, uint32_t *dst)
4163 {
4164 	wave_read_regs(
4165 		adev, simd, wave, 0,
4166 		start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
4167 }
4168 
4169 static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev,
4170 				  u32 me, u32 pipe, u32 q)
4171 {
4172 	cik_srbm_select(adev, me, pipe, q, 0);
4173 }
4174 
4175 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4176 	.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4177 	.select_se_sh = &gfx_v7_0_select_se_sh,
4178 	.read_wave_data = &gfx_v7_0_read_wave_data,
4179 	.read_wave_sgprs = &gfx_v7_0_read_wave_sgprs,
4180 	.select_me_pipe_q = &gfx_v7_0_select_me_pipe_q
4181 };
4182 
4183 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
4184 	.is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
4185 	.set_safe_mode = gfx_v7_0_set_safe_mode,
4186 	.unset_safe_mode = gfx_v7_0_unset_safe_mode,
4187 	.init = gfx_v7_0_rlc_init,
4188 	.get_csb_size = gfx_v7_0_get_csb_size,
4189 	.get_csb_buffer = gfx_v7_0_get_csb_buffer,
4190 	.get_cp_table_num = gfx_v7_0_cp_pg_table_num,
4191 	.resume = gfx_v7_0_rlc_resume,
4192 	.stop = gfx_v7_0_rlc_stop,
4193 	.reset = gfx_v7_0_rlc_reset,
4194 	.start = gfx_v7_0_rlc_start
4195 };
4196 
4197 static int gfx_v7_0_early_init(void *handle)
4198 {
4199 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4200 
4201 	adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4202 	adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
4203 	adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
4204 	adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
4205 	gfx_v7_0_set_ring_funcs(adev);
4206 	gfx_v7_0_set_irq_funcs(adev);
4207 	gfx_v7_0_set_gds_init(adev);
4208 
4209 	return 0;
4210 }
4211 
4212 static int gfx_v7_0_late_init(void *handle)
4213 {
4214 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4215 	int r;
4216 
4217 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4218 	if (r)
4219 		return r;
4220 
4221 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4222 	if (r)
4223 		return r;
4224 
4225 	return 0;
4226 }
4227 
4228 static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4229 {
4230 	u32 gb_addr_config;
4231 	u32 mc_shared_chmap, mc_arb_ramcfg;
4232 	u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
4233 	u32 tmp;
4234 
4235 	switch (adev->asic_type) {
4236 	case CHIP_BONAIRE:
4237 		adev->gfx.config.max_shader_engines = 2;
4238 		adev->gfx.config.max_tile_pipes = 4;
4239 		adev->gfx.config.max_cu_per_sh = 7;
4240 		adev->gfx.config.max_sh_per_se = 1;
4241 		adev->gfx.config.max_backends_per_se = 2;
4242 		adev->gfx.config.max_texture_channel_caches = 4;
4243 		adev->gfx.config.max_gprs = 256;
4244 		adev->gfx.config.max_gs_threads = 32;
4245 		adev->gfx.config.max_hw_contexts = 8;
4246 
4247 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4248 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4249 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4250 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4251 		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4252 		break;
4253 	case CHIP_HAWAII:
4254 		adev->gfx.config.max_shader_engines = 4;
4255 		adev->gfx.config.max_tile_pipes = 16;
4256 		adev->gfx.config.max_cu_per_sh = 11;
4257 		adev->gfx.config.max_sh_per_se = 1;
4258 		adev->gfx.config.max_backends_per_se = 4;
4259 		adev->gfx.config.max_texture_channel_caches = 16;
4260 		adev->gfx.config.max_gprs = 256;
4261 		adev->gfx.config.max_gs_threads = 32;
4262 		adev->gfx.config.max_hw_contexts = 8;
4263 
4264 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4265 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4266 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4267 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4268 		gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
4269 		break;
4270 	case CHIP_KAVERI:
4271 		adev->gfx.config.max_shader_engines = 1;
4272 		adev->gfx.config.max_tile_pipes = 4;
4273 		adev->gfx.config.max_cu_per_sh = 8;
4274 		adev->gfx.config.max_backends_per_se = 2;
4275 		adev->gfx.config.max_sh_per_se = 1;
4276 		adev->gfx.config.max_texture_channel_caches = 4;
4277 		adev->gfx.config.max_gprs = 256;
4278 		adev->gfx.config.max_gs_threads = 16;
4279 		adev->gfx.config.max_hw_contexts = 8;
4280 
4281 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4282 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4283 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4284 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4285 		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4286 		break;
4287 	case CHIP_KABINI:
4288 	case CHIP_MULLINS:
4289 	default:
4290 		adev->gfx.config.max_shader_engines = 1;
4291 		adev->gfx.config.max_tile_pipes = 2;
4292 		adev->gfx.config.max_cu_per_sh = 2;
4293 		adev->gfx.config.max_sh_per_se = 1;
4294 		adev->gfx.config.max_backends_per_se = 1;
4295 		adev->gfx.config.max_texture_channel_caches = 2;
4296 		adev->gfx.config.max_gprs = 256;
4297 		adev->gfx.config.max_gs_threads = 16;
4298 		adev->gfx.config.max_hw_contexts = 8;
4299 
4300 		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
4301 		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
4302 		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
4303 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
4304 		gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
4305 		break;
4306 	}
4307 
4308 	mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
4309 	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
4310 	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
4311 
4312 	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
4313 	adev->gfx.config.mem_max_burst_length_bytes = 256;
4314 	if (adev->flags & AMD_IS_APU) {
4315 		/* Get memory bank mapping mode. */
4316 		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
4317 		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4318 		dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4319 
4320 		tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
4321 		dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
4322 		dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
4323 
4324 		/* Validate settings in case only one DIMM installed. */
4325 		if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
4326 			dimm00_addr_map = 0;
4327 		if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
4328 			dimm01_addr_map = 0;
4329 		if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
4330 			dimm10_addr_map = 0;
4331 		if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
4332 			dimm11_addr_map = 0;
4333 
4334 		/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
4335 		/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
4336 		if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
4337 			adev->gfx.config.mem_row_size_in_kb = 2;
4338 		else
4339 			adev->gfx.config.mem_row_size_in_kb = 1;
4340 	} else {
4341 		tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT;
4342 		adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
4343 		if (adev->gfx.config.mem_row_size_in_kb > 4)
4344 			adev->gfx.config.mem_row_size_in_kb = 4;
4345 	}
4346 	/* XXX use MC settings? */
4347 	adev->gfx.config.shader_engine_tile_size = 32;
4348 	adev->gfx.config.num_gpus = 1;
4349 	adev->gfx.config.multi_gpu_tile_size = 64;
4350 
4351 	/* fix up row size */
4352 	gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK;
4353 	switch (adev->gfx.config.mem_row_size_in_kb) {
4354 	case 1:
4355 	default:
4356 		gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4357 		break;
4358 	case 2:
4359 		gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4360 		break;
4361 	case 4:
4362 		gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT);
4363 		break;
4364 	}
4365 	adev->gfx.config.gb_addr_config = gb_addr_config;
4366 }
4367 
4368 static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
4369 					int mec, int pipe, int queue)
4370 {
4371 	int r;
4372 	unsigned irq_type;
4373 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
4374 
4375 	/* mec0 is me1 */
4376 	ring->me = mec + 1;
4377 	ring->pipe = pipe;
4378 	ring->queue = queue;
4379 
4380 	ring->ring_obj = NULL;
4381 	ring->use_doorbell = true;
4382 	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
4383 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
4384 
4385 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
4386 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
4387 		+ ring->pipe;
4388 
4389 	/* type-2 packets are deprecated on MEC, use type-3 instead */
4390 	r = amdgpu_ring_init(adev, ring, 1024,
4391 			&adev->gfx.eop_irq, irq_type);
4392 	if (r)
4393 		return r;
4394 
4395 
4396 	return 0;
4397 }
4398 
4399 static int gfx_v7_0_sw_init(void *handle)
4400 {
4401 	struct amdgpu_ring *ring;
4402 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4403 	int i, j, k, r, ring_id;
4404 
4405 	switch (adev->asic_type) {
4406 	case CHIP_KAVERI:
4407 		adev->gfx.mec.num_mec = 2;
4408 		break;
4409 	case CHIP_BONAIRE:
4410 	case CHIP_HAWAII:
4411 	case CHIP_KABINI:
4412 	case CHIP_MULLINS:
4413 	default:
4414 		adev->gfx.mec.num_mec = 1;
4415 		break;
4416 	}
4417 	adev->gfx.mec.num_pipe_per_mec = 4;
4418 	adev->gfx.mec.num_queue_per_pipe = 8;
4419 
4420 	/* EOP Event */
4421 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
4422 	if (r)
4423 		return r;
4424 
4425 	/* Privileged reg */
4426 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184,
4427 			      &adev->gfx.priv_reg_irq);
4428 	if (r)
4429 		return r;
4430 
4431 	/* Privileged inst */
4432 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185,
4433 			      &adev->gfx.priv_inst_irq);
4434 	if (r)
4435 		return r;
4436 
4437 	gfx_v7_0_scratch_init(adev);
4438 
4439 	r = gfx_v7_0_init_microcode(adev);
4440 	if (r) {
4441 		DRM_ERROR("Failed to load gfx firmware!\n");
4442 		return r;
4443 	}
4444 
4445 	r = adev->gfx.rlc.funcs->init(adev);
4446 	if (r) {
4447 		DRM_ERROR("Failed to init rlc BOs!\n");
4448 		return r;
4449 	}
4450 
4451 	/* allocate mec buffers */
4452 	r = gfx_v7_0_mec_init(adev);
4453 	if (r) {
4454 		DRM_ERROR("Failed to init MEC BOs!\n");
4455 		return r;
4456 	}
4457 
4458 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4459 		ring = &adev->gfx.gfx_ring[i];
4460 		ring->ring_obj = NULL;
4461 		sprintf(ring->name, "gfx");
4462 		r = amdgpu_ring_init(adev, ring, 1024,
4463 				     &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
4464 		if (r)
4465 			return r;
4466 	}
4467 
4468 	/* set up the compute queues - allocate horizontally across pipes */
4469 	ring_id = 0;
4470 	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4471 		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4472 			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4473 				if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
4474 					continue;
4475 
4476 				r = gfx_v7_0_compute_ring_init(adev,
4477 								ring_id,
4478 								i, k, j);
4479 				if (r)
4480 					return r;
4481 
4482 				ring_id++;
4483 			}
4484 		}
4485 	}
4486 
4487 	adev->gfx.ce_ram_size = 0x8000;
4488 
4489 	gfx_v7_0_gpu_early_init(adev);
4490 
4491 	return r;
4492 }
4493 
4494 static int gfx_v7_0_sw_fini(void *handle)
4495 {
4496 	int i;
4497 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4498 
4499 	amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
4500 	amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
4501 	amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
4502 
4503 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4504 		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
4505 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
4506 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4507 
4508 	gfx_v7_0_cp_compute_fini(adev);
4509 	amdgpu_gfx_rlc_fini(adev);
4510 	gfx_v7_0_mec_fini(adev);
4511 	amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
4512 				&adev->gfx.rlc.clear_state_gpu_addr,
4513 				(void **)&adev->gfx.rlc.cs_ptr);
4514 	if (adev->gfx.rlc.cp_table_size) {
4515 		amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
4516 				&adev->gfx.rlc.cp_table_gpu_addr,
4517 				(void **)&adev->gfx.rlc.cp_table_ptr);
4518 	}
4519 	gfx_v7_0_free_microcode(adev);
4520 
4521 	return 0;
4522 }
4523 
4524 static int gfx_v7_0_hw_init(void *handle)
4525 {
4526 	int r;
4527 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4528 
4529 	gfx_v7_0_constants_init(adev);
4530 
4531 	/* init rlc */
4532 	r = adev->gfx.rlc.funcs->resume(adev);
4533 	if (r)
4534 		return r;
4535 
4536 	r = gfx_v7_0_cp_resume(adev);
4537 	if (r)
4538 		return r;
4539 
4540 	return r;
4541 }
4542 
4543 static int gfx_v7_0_hw_fini(void *handle)
4544 {
4545 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4546 
4547 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4548 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4549 	gfx_v7_0_cp_enable(adev, false);
4550 	adev->gfx.rlc.funcs->stop(adev);
4551 	gfx_v7_0_fini_pg(adev);
4552 
4553 	return 0;
4554 }
4555 
4556 static int gfx_v7_0_suspend(void *handle)
4557 {
4558 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4559 
4560 	return gfx_v7_0_hw_fini(adev);
4561 }
4562 
4563 static int gfx_v7_0_resume(void *handle)
4564 {
4565 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4566 
4567 	return gfx_v7_0_hw_init(adev);
4568 }
4569 
4570 static bool gfx_v7_0_is_idle(void *handle)
4571 {
4572 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4573 
4574 	if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
4575 		return false;
4576 	else
4577 		return true;
4578 }
4579 
4580 static int gfx_v7_0_wait_for_idle(void *handle)
4581 {
4582 	unsigned i;
4583 	u32 tmp;
4584 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4585 
4586 	for (i = 0; i < adev->usec_timeout; i++) {
4587 		/* read MC_STATUS */
4588 		tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
4589 
4590 		if (!tmp)
4591 			return 0;
4592 		udelay(1);
4593 	}
4594 	return -ETIMEDOUT;
4595 }
4596 
4597 static int gfx_v7_0_soft_reset(void *handle)
4598 {
4599 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4600 	u32 tmp;
4601 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4602 
4603 	/* GRBM_STATUS */
4604 	tmp = RREG32(mmGRBM_STATUS);
4605 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4606 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4607 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4608 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4609 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4610 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
4611 		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK |
4612 			GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK;
4613 
4614 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4615 		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK;
4616 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4617 	}
4618 
4619 	/* GRBM_STATUS2 */
4620 	tmp = RREG32(mmGRBM_STATUS2);
4621 	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
4622 		grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK;
4623 
4624 	/* SRBM_STATUS */
4625 	tmp = RREG32(mmSRBM_STATUS);
4626 	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
4627 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
4628 
4629 	if (grbm_soft_reset || srbm_soft_reset) {
4630 		/* disable CG/PG */
4631 		gfx_v7_0_fini_pg(adev);
4632 		gfx_v7_0_update_cg(adev, false);
4633 
4634 		/* stop the rlc */
4635 		adev->gfx.rlc.funcs->stop(adev);
4636 
4637 		/* Disable GFX parsing/prefetching */
4638 		WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
4639 
4640 		/* Disable MEC parsing/prefetching */
4641 		WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
4642 
4643 		if (grbm_soft_reset) {
4644 			tmp = RREG32(mmGRBM_SOFT_RESET);
4645 			tmp |= grbm_soft_reset;
4646 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4647 			WREG32(mmGRBM_SOFT_RESET, tmp);
4648 			tmp = RREG32(mmGRBM_SOFT_RESET);
4649 
4650 			udelay(50);
4651 
4652 			tmp &= ~grbm_soft_reset;
4653 			WREG32(mmGRBM_SOFT_RESET, tmp);
4654 			tmp = RREG32(mmGRBM_SOFT_RESET);
4655 		}
4656 
4657 		if (srbm_soft_reset) {
4658 			tmp = RREG32(mmSRBM_SOFT_RESET);
4659 			tmp |= srbm_soft_reset;
4660 			dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4661 			WREG32(mmSRBM_SOFT_RESET, tmp);
4662 			tmp = RREG32(mmSRBM_SOFT_RESET);
4663 
4664 			udelay(50);
4665 
4666 			tmp &= ~srbm_soft_reset;
4667 			WREG32(mmSRBM_SOFT_RESET, tmp);
4668 			tmp = RREG32(mmSRBM_SOFT_RESET);
4669 		}
4670 		/* Wait a little for things to settle down */
4671 		udelay(50);
4672 	}
4673 	return 0;
4674 }
4675 
4676 static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4677 						 enum amdgpu_interrupt_state state)
4678 {
4679 	u32 cp_int_cntl;
4680 
4681 	switch (state) {
4682 	case AMDGPU_IRQ_STATE_DISABLE:
4683 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4684 		cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4685 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4686 		break;
4687 	case AMDGPU_IRQ_STATE_ENABLE:
4688 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4689 		cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4690 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4691 		break;
4692 	default:
4693 		break;
4694 	}
4695 }
4696 
4697 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4698 						     int me, int pipe,
4699 						     enum amdgpu_interrupt_state state)
4700 {
4701 	u32 mec_int_cntl, mec_int_cntl_reg;
4702 
4703 	/*
4704 	 * amdgpu controls only the first MEC. That's why this function only
4705 	 * handles the setting of interrupts for this specific MEC. All other
4706 	 * pipes' interrupts are set by amdkfd.
4707 	 */
4708 
4709 	if (me == 1) {
4710 		switch (pipe) {
4711 		case 0:
4712 			mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
4713 			break;
4714 		case 1:
4715 			mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
4716 			break;
4717 		case 2:
4718 			mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
4719 			break;
4720 		case 3:
4721 			mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
4722 			break;
4723 		default:
4724 			DRM_DEBUG("invalid pipe %d\n", pipe);
4725 			return;
4726 		}
4727 	} else {
4728 		DRM_DEBUG("invalid me %d\n", me);
4729 		return;
4730 	}
4731 
4732 	switch (state) {
4733 	case AMDGPU_IRQ_STATE_DISABLE:
4734 		mec_int_cntl = RREG32(mec_int_cntl_reg);
4735 		mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4736 		WREG32(mec_int_cntl_reg, mec_int_cntl);
4737 		break;
4738 	case AMDGPU_IRQ_STATE_ENABLE:
4739 		mec_int_cntl = RREG32(mec_int_cntl_reg);
4740 		mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
4741 		WREG32(mec_int_cntl_reg, mec_int_cntl);
4742 		break;
4743 	default:
4744 		break;
4745 	}
4746 }
4747 
4748 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4749 					     struct amdgpu_irq_src *src,
4750 					     unsigned type,
4751 					     enum amdgpu_interrupt_state state)
4752 {
4753 	u32 cp_int_cntl;
4754 
4755 	switch (state) {
4756 	case AMDGPU_IRQ_STATE_DISABLE:
4757 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4758 		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4759 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4760 		break;
4761 	case AMDGPU_IRQ_STATE_ENABLE:
4762 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4763 		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
4764 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4765 		break;
4766 	default:
4767 		break;
4768 	}
4769 
4770 	return 0;
4771 }
4772 
4773 static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4774 					      struct amdgpu_irq_src *src,
4775 					      unsigned type,
4776 					      enum amdgpu_interrupt_state state)
4777 {
4778 	u32 cp_int_cntl;
4779 
4780 	switch (state) {
4781 	case AMDGPU_IRQ_STATE_DISABLE:
4782 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4783 		cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4784 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4785 		break;
4786 	case AMDGPU_IRQ_STATE_ENABLE:
4787 		cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4788 		cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
4789 		WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4790 		break;
4791 	default:
4792 		break;
4793 	}
4794 
4795 	return 0;
4796 }
4797 
4798 static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4799 					    struct amdgpu_irq_src *src,
4800 					    unsigned type,
4801 					    enum amdgpu_interrupt_state state)
4802 {
4803 	switch (type) {
4804 	case AMDGPU_CP_IRQ_GFX_EOP:
4805 		gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
4806 		break;
4807 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4808 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4809 		break;
4810 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4811 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4812 		break;
4813 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4814 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4815 		break;
4816 	case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4817 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4818 		break;
4819 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4820 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4821 		break;
4822 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4823 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4824 		break;
4825 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4826 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4827 		break;
4828 	case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4829 		gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4830 		break;
4831 	default:
4832 		break;
4833 	}
4834 	return 0;
4835 }
4836 
4837 static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
4838 			    struct amdgpu_irq_src *source,
4839 			    struct amdgpu_iv_entry *entry)
4840 {
4841 	u8 me_id, pipe_id;
4842 	struct amdgpu_ring *ring;
4843 	int i;
4844 
4845 	DRM_DEBUG("IH: CP EOP\n");
4846 	me_id = (entry->ring_id & 0x0c) >> 2;
4847 	pipe_id = (entry->ring_id & 0x03) >> 0;
4848 	switch (me_id) {
4849 	case 0:
4850 		amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4851 		break;
4852 	case 1:
4853 	case 2:
4854 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4855 			ring = &adev->gfx.compute_ring[i];
4856 			if ((ring->me == me_id) && (ring->pipe == pipe_id))
4857 				amdgpu_fence_process(ring);
4858 		}
4859 		break;
4860 	}
4861 	return 0;
4862 }
4863 
4864 static void gfx_v7_0_fault(struct amdgpu_device *adev,
4865 			   struct amdgpu_iv_entry *entry)
4866 {
4867 	struct amdgpu_ring *ring;
4868 	u8 me_id, pipe_id;
4869 	int i;
4870 
4871 	me_id = (entry->ring_id & 0x0c) >> 2;
4872 	pipe_id = (entry->ring_id & 0x03) >> 0;
4873 	switch (me_id) {
4874 	case 0:
4875 		drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
4876 		break;
4877 	case 1:
4878 	case 2:
4879 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4880 			ring = &adev->gfx.compute_ring[i];
4881 			if ((ring->me == me_id) && (ring->pipe == pipe_id))
4882 				drm_sched_fault(&ring->sched);
4883 		}
4884 		break;
4885 	}
4886 }
4887 
4888 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
4889 				 struct amdgpu_irq_src *source,
4890 				 struct amdgpu_iv_entry *entry)
4891 {
4892 	DRM_ERROR("Illegal register access in command stream\n");
4893 	gfx_v7_0_fault(adev, entry);
4894 	return 0;
4895 }
4896 
4897 static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
4898 				  struct amdgpu_irq_src *source,
4899 				  struct amdgpu_iv_entry *entry)
4900 {
4901 	DRM_ERROR("Illegal instruction in command stream\n");
4902 	// XXX soft reset the gfx block only
4903 	gfx_v7_0_fault(adev, entry);
4904 	return 0;
4905 }
4906 
4907 static int gfx_v7_0_set_clockgating_state(void *handle,
4908 					  enum amd_clockgating_state state)
4909 {
4910 	bool gate = false;
4911 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4912 
4913 	if (state == AMD_CG_STATE_GATE)
4914 		gate = true;
4915 
4916 	gfx_v7_0_enable_gui_idle_interrupt(adev, false);
4917 	/* order matters! */
4918 	if (gate) {
4919 		gfx_v7_0_enable_mgcg(adev, true);
4920 		gfx_v7_0_enable_cgcg(adev, true);
4921 	} else {
4922 		gfx_v7_0_enable_cgcg(adev, false);
4923 		gfx_v7_0_enable_mgcg(adev, false);
4924 	}
4925 	gfx_v7_0_enable_gui_idle_interrupt(adev, true);
4926 
4927 	return 0;
4928 }
4929 
4930 static int gfx_v7_0_set_powergating_state(void *handle,
4931 					  enum amd_powergating_state state)
4932 {
4933 	bool gate = false;
4934 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4935 
4936 	if (state == AMD_PG_STATE_GATE)
4937 		gate = true;
4938 
4939 	if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4940 			      AMD_PG_SUPPORT_GFX_SMG |
4941 			      AMD_PG_SUPPORT_GFX_DMG |
4942 			      AMD_PG_SUPPORT_CP |
4943 			      AMD_PG_SUPPORT_GDS |
4944 			      AMD_PG_SUPPORT_RLC_SMU_HS)) {
4945 		gfx_v7_0_update_gfx_pg(adev, gate);
4946 		if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4947 			gfx_v7_0_enable_cp_pg(adev, gate);
4948 			gfx_v7_0_enable_gds_pg(adev, gate);
4949 		}
4950 	}
4951 
4952 	return 0;
4953 }
4954 
4955 static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
4956 	.name = "gfx_v7_0",
4957 	.early_init = gfx_v7_0_early_init,
4958 	.late_init = gfx_v7_0_late_init,
4959 	.sw_init = gfx_v7_0_sw_init,
4960 	.sw_fini = gfx_v7_0_sw_fini,
4961 	.hw_init = gfx_v7_0_hw_init,
4962 	.hw_fini = gfx_v7_0_hw_fini,
4963 	.suspend = gfx_v7_0_suspend,
4964 	.resume = gfx_v7_0_resume,
4965 	.is_idle = gfx_v7_0_is_idle,
4966 	.wait_for_idle = gfx_v7_0_wait_for_idle,
4967 	.soft_reset = gfx_v7_0_soft_reset,
4968 	.set_clockgating_state = gfx_v7_0_set_clockgating_state,
4969 	.set_powergating_state = gfx_v7_0_set_powergating_state,
4970 };
4971 
4972 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
4973 	.type = AMDGPU_RING_TYPE_GFX,
4974 	.align_mask = 0xff,
4975 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4976 	.support_64bit_ptrs = false,
4977 	.get_rptr = gfx_v7_0_ring_get_rptr,
4978 	.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
4979 	.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
4980 	.emit_frame_size =
4981 		20 + /* gfx_v7_0_ring_emit_gds_switch */
4982 		7 + /* gfx_v7_0_ring_emit_hdp_flush */
4983 		5 + /* hdp invalidate */
4984 		12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
4985 		7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
4986 		CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
4987 		3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
4988 	.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
4989 	.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
4990 	.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
4991 	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
4992 	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
4993 	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
4994 	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
4995 	.test_ring = gfx_v7_0_ring_test_ring,
4996 	.test_ib = gfx_v7_0_ring_test_ib,
4997 	.insert_nop = amdgpu_ring_insert_nop,
4998 	.pad_ib = amdgpu_ring_generic_pad_ib,
4999 	.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
5000 	.emit_wreg = gfx_v7_0_ring_emit_wreg,
5001 	.soft_recovery = gfx_v7_0_ring_soft_recovery,
5002 };
5003 
5004 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5005 	.type = AMDGPU_RING_TYPE_COMPUTE,
5006 	.align_mask = 0xff,
5007 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
5008 	.support_64bit_ptrs = false,
5009 	.get_rptr = gfx_v7_0_ring_get_rptr,
5010 	.get_wptr = gfx_v7_0_ring_get_wptr_compute,
5011 	.set_wptr = gfx_v7_0_ring_set_wptr_compute,
5012 	.emit_frame_size =
5013 		20 + /* gfx_v7_0_ring_emit_gds_switch */
5014 		7 + /* gfx_v7_0_ring_emit_hdp_flush */
5015 		5 + /* hdp invalidate */
5016 		7 + /* gfx_v7_0_ring_emit_pipeline_sync */
5017 		CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
5018 		7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
5019 	.emit_ib_size =	7, /* gfx_v7_0_ring_emit_ib_compute */
5020 	.emit_ib = gfx_v7_0_ring_emit_ib_compute,
5021 	.emit_fence = gfx_v7_0_ring_emit_fence_compute,
5022 	.emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
5023 	.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
5024 	.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
5025 	.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
5026 	.test_ring = gfx_v7_0_ring_test_ring,
5027 	.test_ib = gfx_v7_0_ring_test_ib,
5028 	.insert_nop = amdgpu_ring_insert_nop,
5029 	.pad_ib = amdgpu_ring_generic_pad_ib,
5030 	.emit_wreg = gfx_v7_0_ring_emit_wreg,
5031 };
5032 
5033 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
5034 {
5035 	int i;
5036 
5037 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
5038 		adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx;
5039 	for (i = 0; i < adev->gfx.num_compute_rings; i++)
5040 		adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute;
5041 }
5042 
5043 static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = {
5044 	.set = gfx_v7_0_set_eop_interrupt_state,
5045 	.process = gfx_v7_0_eop_irq,
5046 };
5047 
5048 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = {
5049 	.set = gfx_v7_0_set_priv_reg_fault_state,
5050 	.process = gfx_v7_0_priv_reg_irq,
5051 };
5052 
5053 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = {
5054 	.set = gfx_v7_0_set_priv_inst_fault_state,
5055 	.process = gfx_v7_0_priv_inst_irq,
5056 };
5057 
5058 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev)
5059 {
5060 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
5061 	adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs;
5062 
5063 	adev->gfx.priv_reg_irq.num_types = 1;
5064 	adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs;
5065 
5066 	adev->gfx.priv_inst_irq.num_types = 1;
5067 	adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs;
5068 }
5069 
5070 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
5071 {
5072 	/* init asci gds info */
5073 	adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
5074 	adev->gds.gws.total_size = 64;
5075 	adev->gds.oa.total_size = 16;
5076 	adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
5077 
5078 	if (adev->gds.mem.total_size == 64 * 1024) {
5079 		adev->gds.mem.gfx_partition_size = 4096;
5080 		adev->gds.mem.cs_partition_size = 4096;
5081 
5082 		adev->gds.gws.gfx_partition_size = 4;
5083 		adev->gds.gws.cs_partition_size = 4;
5084 
5085 		adev->gds.oa.gfx_partition_size = 4;
5086 		adev->gds.oa.cs_partition_size = 1;
5087 	} else {
5088 		adev->gds.mem.gfx_partition_size = 1024;
5089 		adev->gds.mem.cs_partition_size = 1024;
5090 
5091 		adev->gds.gws.gfx_partition_size = 16;
5092 		adev->gds.gws.cs_partition_size = 16;
5093 
5094 		adev->gds.oa.gfx_partition_size = 4;
5095 		adev->gds.oa.cs_partition_size = 4;
5096 	}
5097 }
5098 
5099 
5100 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5101 {
5102 	int i, j, k, counter, active_cu_number = 0;
5103 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5104 	struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
5105 	unsigned disable_masks[4 * 2];
5106 	u32 ao_cu_num;
5107 
5108 	if (adev->flags & AMD_IS_APU)
5109 		ao_cu_num = 2;
5110 	else
5111 		ao_cu_num = adev->gfx.config.max_cu_per_sh;
5112 
5113 	memset(cu_info, 0, sizeof(*cu_info));
5114 
5115 	amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5116 
5117 	mutex_lock(&adev->grbm_idx_mutex);
5118 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5119 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5120 			mask = 1;
5121 			ao_bitmap = 0;
5122 			counter = 0;
5123 			gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
5124 			if (i < 4 && j < 2)
5125 				gfx_v7_0_set_user_cu_inactive_bitmap(
5126 					adev, disable_masks[i * 2 + j]);
5127 			bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5128 			cu_info->bitmap[i][j] = bitmap;
5129 
5130 			for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
5131 				if (bitmap & mask) {
5132 					if (counter < ao_cu_num)
5133 						ao_bitmap |= mask;
5134 					counter ++;
5135 				}
5136 				mask <<= 1;
5137 			}
5138 			active_cu_number += counter;
5139 			if (i < 2 && j < 2)
5140 				ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5141 			cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
5142 		}
5143 	}
5144 	gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5145 	mutex_unlock(&adev->grbm_idx_mutex);
5146 
5147 	cu_info->number = active_cu_number;
5148 	cu_info->ao_cu_mask = ao_cu_mask;
5149 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
5150 	cu_info->max_waves_per_simd = 10;
5151 	cu_info->max_scratch_slots_per_cu = 32;
5152 	cu_info->wave_front_size = 64;
5153 	cu_info->lds_size = 64;
5154 }
5155 
5156 const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
5157 {
5158 	.type = AMD_IP_BLOCK_TYPE_GFX,
5159 	.major = 7,
5160 	.minor = 0,
5161 	.rev = 0,
5162 	.funcs = &gfx_v7_0_ip_funcs,
5163 };
5164 
5165 const struct amdgpu_ip_block_version gfx_v7_1_ip_block =
5166 {
5167 	.type = AMD_IP_BLOCK_TYPE_GFX,
5168 	.major = 7,
5169 	.minor = 1,
5170 	.rev = 0,
5171 	.funcs = &gfx_v7_0_ip_funcs,
5172 };
5173 
5174 const struct amdgpu_ip_block_version gfx_v7_2_ip_block =
5175 {
5176 	.type = AMD_IP_BLOCK_TYPE_GFX,
5177 	.major = 7,
5178 	.minor = 2,
5179 	.rev = 0,
5180 	.funcs = &gfx_v7_0_ip_funcs,
5181 };
5182 
5183 const struct amdgpu_ip_block_version gfx_v7_3_ip_block =
5184 {
5185 	.type = AMD_IP_BLOCK_TYPE_GFX,
5186 	.major = 7,
5187 	.minor = 3,
5188 	.rev = 0,
5189 	.funcs = &gfx_v7_0_ip_funcs,
5190 };
5191