1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/* To compile this assembly code:
24 *
25 * Navi1x:
26 *   cpp -DASIC_FAMILY=CHIP_NAVI10 cwsr_trap_handler_gfx10.asm -P -o nv1x.sp3
27 *   sp3 nv1x.sp3 -hex nv1x.hex
28 *
29 * gfx10:
30 *   cpp -DASIC_FAMILY=CHIP_SIENNA_CICHLID cwsr_trap_handler_gfx10.asm -P -o gfx10.sp3
31 *   sp3 gfx10.sp3 -hex gfx10.hex
32 *
33 * gfx11:
34 *   cpp -DASIC_FAMILY=CHIP_PLUM_BONITO cwsr_trap_handler_gfx10.asm -P -o gfx11.sp3
35 *   sp3 gfx11.sp3 -hex gfx11.hex
36 */
37
38#define CHIP_NAVI10 26
39#define CHIP_SIENNA_CICHLID 30
40#define CHIP_PLUM_BONITO 36
41
42#define NO_SQC_STORE (ASIC_FAMILY >= CHIP_SIENNA_CICHLID)
43#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
44#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
45#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
46#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO)
47
48var SINGLE_STEP_MISSED_WORKAROUND		= 1	//workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
49
50var SQ_WAVE_STATUS_SPI_PRIO_MASK		= 0x00000006
51var SQ_WAVE_STATUS_HALT_MASK			= 0x2000
52var SQ_WAVE_STATUS_ECC_ERR_MASK			= 0x20000
53var SQ_WAVE_STATUS_TRAP_EN_SHIFT		= 6
54
55var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT		= 12
56var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE		= 9
57var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE		= 8
58var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT	= 24
59var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE	= 4
60var SQ_WAVE_IB_STS2_WAVE64_SHIFT		= 11
61var SQ_WAVE_IB_STS2_WAVE64_SIZE			= 1
62
63#if ASIC_FAMILY < CHIP_PLUM_BONITO
64var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT		= 8
65#else
66var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT		= 12
67#endif
68
69var SQ_WAVE_TRAPSTS_SAVECTX_MASK		= 0x400
70var SQ_WAVE_TRAPSTS_EXCP_MASK			= 0x1FF
71var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT		= 10
72var SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK		= 0x80
73var SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT		= 7
74var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK		= 0x100
75var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT		= 8
76var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK		= 0x3FF
77var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT		= 0x0
78var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE		= 10
79var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK		= 0xFFFFF800
80var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT		= 11
81var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE		= 21
82var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK		= 0x800
83var SQ_WAVE_TRAPSTS_EXCP_HI_MASK		= 0x7000
84
85var SQ_WAVE_MODE_EXCP_EN_SHIFT			= 12
86var SQ_WAVE_MODE_EXCP_EN_ADDR_WATCH_SHIFT	= 19
87
88var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT		= 15
89var SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT		= 25
90var SQ_WAVE_IB_STS_REPLAY_W64H_MASK		= 0x02000000
91var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK	= 0x003F8000
92
93var SQ_WAVE_MODE_DEBUG_EN_MASK			= 0x800
94
95// bits [31:24] unused by SPI debug data
96var TTMP11_SAVE_REPLAY_W64H_SHIFT		= 31
97var TTMP11_SAVE_REPLAY_W64H_MASK		= 0x80000000
98var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT		= 24
99var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK		= 0x7F000000
100var TTMP11_DEBUG_TRAP_ENABLED_SHIFT		= 23
101var TTMP11_DEBUG_TRAP_ENABLED_MASK		= 0x800000
102
103// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
104// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
105var S_SAVE_BUF_RSRC_WORD1_STRIDE		= 0x00040000
106var S_SAVE_BUF_RSRC_WORD3_MISC			= 0x10807FAC
107var S_SAVE_PC_HI_TRAP_ID_MASK			= 0x00FF0000
108var S_SAVE_PC_HI_HT_MASK			= 0x01000000
109var S_SAVE_SPI_INIT_FIRST_WAVE_MASK		= 0x04000000
110var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT		= 26
111
112var S_SAVE_PC_HI_FIRST_WAVE_MASK		= 0x80000000
113var S_SAVE_PC_HI_FIRST_WAVE_SHIFT		= 31
114
115var s_sgpr_save_num				= 108
116
117var s_save_spi_init_lo				= exec_lo
118var s_save_spi_init_hi				= exec_hi
119var s_save_pc_lo				= ttmp0
120var s_save_pc_hi				= ttmp1
121var s_save_exec_lo				= ttmp2
122var s_save_exec_hi				= ttmp3
123var s_save_status				= ttmp12
124var s_save_trapsts				= ttmp15
125var s_save_xnack_mask				= s_save_trapsts
126var s_wave_size					= ttmp7
127var s_save_buf_rsrc0				= ttmp8
128var s_save_buf_rsrc1				= ttmp9
129var s_save_buf_rsrc2				= ttmp10
130var s_save_buf_rsrc3				= ttmp11
131var s_save_mem_offset				= ttmp4
132var s_save_alloc_size				= s_save_trapsts
133var s_save_tmp					= ttmp14
134var s_save_m0					= ttmp5
135var s_save_ttmps_lo				= s_save_tmp
136var s_save_ttmps_hi				= s_save_trapsts
137
138var S_RESTORE_BUF_RSRC_WORD1_STRIDE		= S_SAVE_BUF_RSRC_WORD1_STRIDE
139var S_RESTORE_BUF_RSRC_WORD3_MISC		= S_SAVE_BUF_RSRC_WORD3_MISC
140
141var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK		= 0x04000000
142var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT		= 26
143var S_WAVE_SIZE					= 25
144
145var s_restore_spi_init_lo			= exec_lo
146var s_restore_spi_init_hi			= exec_hi
147var s_restore_mem_offset			= ttmp12
148var s_restore_alloc_size			= ttmp3
149var s_restore_tmp				= ttmp2
150var s_restore_mem_offset_save			= s_restore_tmp
151var s_restore_m0				= s_restore_alloc_size
152var s_restore_mode				= ttmp7
153var s_restore_flat_scratch			= s_restore_tmp
154var s_restore_pc_lo				= ttmp0
155var s_restore_pc_hi				= ttmp1
156var s_restore_exec_lo				= ttmp4
157var s_restore_exec_hi				= ttmp5
158var s_restore_status				= ttmp14
159var s_restore_trapsts				= ttmp15
160var s_restore_xnack_mask			= ttmp13
161var s_restore_buf_rsrc0				= ttmp8
162var s_restore_buf_rsrc1				= ttmp9
163var s_restore_buf_rsrc2				= ttmp10
164var s_restore_buf_rsrc3				= ttmp11
165var s_restore_size				= ttmp6
166var s_restore_ttmps_lo				= s_restore_tmp
167var s_restore_ttmps_hi				= s_restore_alloc_size
168
169shader main
170	asic(DEFAULT)
171	type(CS)
172	wave_size(32)
173
174	s_branch	L_SKIP_RESTORE						//NOT restore. might be a regular trap or save
175
176L_JUMP_TO_RESTORE:
177	s_branch	L_RESTORE
178
179L_SKIP_RESTORE:
180	s_getreg_b32	s_save_status, hwreg(HW_REG_STATUS)			//save STATUS since we will change SCC
181
182	// Clear SPI_PRIO: do not save with elevated priority.
183	// Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
184	s_andn2_b32	s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_ECC_ERR_MASK
185
186	s_getreg_b32	s_save_trapsts, hwreg(HW_REG_TRAPSTS)
187
188#if SW_SA_TRAP
189	// If ttmp1[30] is set then issue s_barrier to unblock dependent waves.
190	s_bitcmp1_b32	s_save_pc_hi, 30
191	s_cbranch_scc0	L_TRAP_NO_BARRIER
192	s_barrier
193
194L_TRAP_NO_BARRIER:
195	// If ttmp1[31] is set then trap may occur early.
196	// Spin wait until SAVECTX exception is raised.
197	s_bitcmp1_b32	s_save_pc_hi, 31
198	s_cbranch_scc1  L_CHECK_SAVE
199#endif
200
201	s_and_b32       ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
202	s_cbranch_scc0	L_NOT_HALTED
203
204L_HALTED:
205	// Host trap may occur while wave is halted.
206	s_and_b32	ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
207	s_cbranch_scc1	L_FETCH_2ND_TRAP
208
209L_CHECK_SAVE:
210	s_and_b32	ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK
211	s_cbranch_scc1	L_SAVE
212
213	// Wave is halted but neither host trap nor SAVECTX is raised.
214	// Caused by instruction fetch memory violation.
215	// Spin wait until context saved to prevent interrupt storm.
216	s_sleep		0x10
217	s_getreg_b32	s_save_trapsts, hwreg(HW_REG_TRAPSTS)
218	s_branch	L_CHECK_SAVE
219
220L_NOT_HALTED:
221	// Let second-level handle non-SAVECTX exception or trap.
222	// Any concurrent SAVECTX will be handled upon re-entry once halted.
223
224	// Check non-maskable exceptions. memory_violation, illegal_instruction
225	// and xnack_error exceptions always cause the wave to enter the trap
226	// handler.
227	s_and_b32	ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK|SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
228	s_cbranch_scc1	L_FETCH_2ND_TRAP
229
230	// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
231	// Maskable exceptions only cause the wave to enter the trap handler if
232	// their respective bit in mode.excp_en is set.
233	s_and_b32	ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
234	s_cbranch_scc0	L_CHECK_TRAP_ID
235
236	s_and_b32	ttmp3, s_save_trapsts, SQ_WAVE_TRAPSTS_ADDR_WATCH_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
237	s_cbranch_scc0	L_NOT_ADDR_WATCH
238	s_bitset1_b32	ttmp2, SQ_WAVE_TRAPSTS_ADDR_WATCH_SHIFT // Check all addr_watch[123] exceptions against excp_en.addr_watch
239
240L_NOT_ADDR_WATCH:
241	s_getreg_b32	ttmp3, hwreg(HW_REG_MODE)
242	s_lshl_b32	ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
243	s_and_b32	ttmp2, ttmp2, ttmp3
244	s_cbranch_scc1	L_FETCH_2ND_TRAP
245
246L_CHECK_TRAP_ID:
247	// Check trap_id != 0
248	s_and_b32	ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
249	s_cbranch_scc1	L_FETCH_2ND_TRAP
250
251if SINGLE_STEP_MISSED_WORKAROUND
252	// Prioritize single step exception over context save.
253	// Second-level trap will halt wave and RFE, re-entering for SAVECTX.
254	s_getreg_b32	ttmp2, hwreg(HW_REG_MODE)
255	s_and_b32	ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
256	s_cbranch_scc1	L_FETCH_2ND_TRAP
257end
258
259	s_and_b32	ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK
260	s_cbranch_scc1	L_SAVE
261
262L_FETCH_2ND_TRAP:
263#if HAVE_XNACK
264	save_and_clear_ib_sts(ttmp14, ttmp15)
265#endif
266
267	// Read second-level TBA/TMA from first-level TMA and jump if available.
268	// ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
269	// ttmp12 holds SQ_WAVE_STATUS
270#if HAVE_SENDMSG_RTN
271	s_sendmsg_rtn_b64       [ttmp14, ttmp15], sendmsg(MSG_RTN_GET_TMA)
272	s_waitcnt       lgkmcnt(0)
273#else
274	s_getreg_b32	ttmp14, hwreg(HW_REG_SHADER_TMA_LO)
275	s_getreg_b32	ttmp15, hwreg(HW_REG_SHADER_TMA_HI)
276#endif
277	s_lshl_b64	[ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
278
279	s_bitcmp1_b32	ttmp15, 0xF
280	s_cbranch_scc0	L_NO_SIGN_EXTEND_TMA
281	s_or_b32	ttmp15, ttmp15, 0xFFFF0000
282L_NO_SIGN_EXTEND_TMA:
283
284	s_load_dword    ttmp2, [ttmp14, ttmp15], 0x10 glc:1			// debug trap enabled flag
285	s_waitcnt       lgkmcnt(0)
286	s_lshl_b32      ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
287	s_andn2_b32     ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
288	s_or_b32        ttmp11, ttmp11, ttmp2
289
290	s_load_dwordx2	[ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1		// second-level TBA
291	s_waitcnt	lgkmcnt(0)
292	s_load_dwordx2	[ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1		// second-level TMA
293	s_waitcnt	lgkmcnt(0)
294
295	s_and_b64	[ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
296	s_cbranch_scc0	L_NO_NEXT_TRAP						// second-level trap handler not been set
297	s_setpc_b64	[ttmp2, ttmp3]						// jump to second-level trap handler
298
299L_NO_NEXT_TRAP:
300	// If not caused by trap then halt wave to prevent re-entry.
301	s_and_b32	ttmp2, s_save_pc_hi, (S_SAVE_PC_HI_TRAP_ID_MASK|S_SAVE_PC_HI_HT_MASK)
302	s_cbranch_scc1	L_TRAP_CASE
303	s_or_b32	s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
304
305	// If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set.
306	// Rewind the PC to prevent this from occurring.
307	s_sub_u32	ttmp0, ttmp0, 0x8
308	s_subb_u32	ttmp1, ttmp1, 0x0
309
310	s_branch	L_EXIT_TRAP
311
312L_TRAP_CASE:
313	// Host trap will not cause trap re-entry.
314	s_and_b32	ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
315	s_cbranch_scc1	L_EXIT_TRAP
316
317	// Advance past trap instruction to prevent re-entry.
318	s_add_u32	ttmp0, ttmp0, 0x4
319	s_addc_u32	ttmp1, ttmp1, 0x0
320
321L_EXIT_TRAP:
322	s_and_b32	ttmp1, ttmp1, 0xFFFF
323
324#if HAVE_XNACK
325	restore_ib_sts(ttmp14, ttmp15)
326#endif
327
328	// Restore SQ_WAVE_STATUS.
329	s_and_b64	exec, exec, exec					// Restore STATUS.EXECZ, not writable by s_setreg_b32
330	s_and_b64	vcc, vcc, vcc						// Restore STATUS.VCCZ, not writable by s_setreg_b32
331	s_setreg_b32	hwreg(HW_REG_STATUS), s_save_status
332
333	s_rfe_b64	[ttmp0, ttmp1]
334
335L_SAVE:
336	s_and_b32	s_save_pc_hi, s_save_pc_hi, 0x0000ffff			//pc[47:32]
337	s_mov_b32	s_save_tmp, 0
338	s_setreg_b32	hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp	//clear saveCtx bit
339
340#if HAVE_XNACK
341	save_and_clear_ib_sts(s_save_tmp, s_save_trapsts)
342#endif
343
344	/* inform SPI the readiness and wait for SPI's go signal */
345	s_mov_b32	s_save_exec_lo, exec_lo					//save EXEC and use EXEC for the go signal from SPI
346	s_mov_b32	s_save_exec_hi, exec_hi
347	s_mov_b64	exec, 0x0						//clear EXEC to get ready to receive
348
349#if HAVE_SENDMSG_RTN
350	s_sendmsg_rtn_b64       [exec_lo, exec_hi], sendmsg(MSG_RTN_SAVE_WAVE)
351#else
352	s_sendmsg	sendmsg(MSG_SAVEWAVE)					//send SPI a message and wait for SPI's write to EXEC
353#endif
354
355#if ASIC_FAMILY < CHIP_SIENNA_CICHLID
356L_SLEEP:
357	// sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause
358	// SQ hang, since the 7,8th wave could not get arbit to exec inst, while
359	// other waves are stuck into the sleep-loop and waiting for wrexec!=0
360	s_sleep		0x2
361	s_cbranch_execz	L_SLEEP
362#else
363	s_waitcnt	lgkmcnt(0)
364#endif
365
366	// Save first_wave flag so we can clear high bits of save address.
367	s_and_b32	s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
368	s_lshl_b32	s_save_tmp, s_save_tmp, (S_SAVE_PC_HI_FIRST_WAVE_SHIFT - S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT)
369	s_or_b32	s_save_pc_hi, s_save_pc_hi, s_save_tmp
370
371#if NO_SQC_STORE
372	// Trap temporaries must be saved via VGPR but all VGPRs are in use.
373	// There is no ttmp space to hold the resource constant for VGPR save.
374	// Save v0 by itself since it requires only two SGPRs.
375	s_mov_b32	s_save_ttmps_lo, exec_lo
376	s_and_b32	s_save_ttmps_hi, exec_hi, 0xFFFF
377	s_mov_b32	exec_lo, 0xFFFFFFFF
378	s_mov_b32	exec_hi, 0xFFFFFFFF
379	global_store_dword_addtid	v0, [s_save_ttmps_lo, s_save_ttmps_hi] slc:1 glc:1
380	v_mov_b32	v0, 0x0
381	s_mov_b32	exec_lo, s_save_ttmps_lo
382	s_mov_b32	exec_hi, s_save_ttmps_hi
383#endif
384
385	// Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
386	// ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
387	get_wave_size(s_save_ttmps_hi)
388	get_vgpr_size_bytes(s_save_ttmps_lo, s_save_ttmps_hi)
389	get_svgpr_size_bytes(s_save_ttmps_hi)
390	s_add_u32	s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
391	s_and_b32	s_save_ttmps_hi, s_save_spi_init_hi, 0xFFFF
392	s_add_u32	s_save_ttmps_lo, s_save_ttmps_lo, get_sgpr_size_bytes()
393	s_add_u32	s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
394	s_addc_u32	s_save_ttmps_hi, s_save_ttmps_hi, 0x0
395
396#if NO_SQC_STORE
397	v_writelane_b32	v0, ttmp4, 0x4
398	v_writelane_b32	v0, ttmp5, 0x5
399	v_writelane_b32	v0, ttmp6, 0x6
400	v_writelane_b32	v0, ttmp7, 0x7
401	v_writelane_b32	v0, ttmp8, 0x8
402	v_writelane_b32	v0, ttmp9, 0x9
403	v_writelane_b32	v0, ttmp10, 0xA
404	v_writelane_b32	v0, ttmp11, 0xB
405	v_writelane_b32	v0, ttmp13, 0xD
406	v_writelane_b32	v0, exec_lo, 0xE
407	v_writelane_b32	v0, exec_hi, 0xF
408
409	s_mov_b32	exec_lo, 0x3FFF
410	s_mov_b32	exec_hi, 0x0
411	global_store_dword_addtid	v0, [s_save_ttmps_lo, s_save_ttmps_hi] inst_offset:0x40 slc:1 glc:1
412	v_readlane_b32	ttmp14, v0, 0xE
413	v_readlane_b32	ttmp15, v0, 0xF
414	s_mov_b32	exec_lo, ttmp14
415	s_mov_b32	exec_hi, ttmp15
416#else
417	s_store_dwordx4	[ttmp4, ttmp5, ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x50 glc:1
418	s_store_dwordx4	[ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x60 glc:1
419	s_store_dword   ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x74 glc:1
420#endif
421
422	/* setup Resource Contants */
423	s_mov_b32	s_save_buf_rsrc0, s_save_spi_init_lo			//base_addr_lo
424	s_and_b32	s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF	//base_addr_hi
425	s_or_b32	s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
426	s_mov_b32	s_save_buf_rsrc2, 0					//NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
427	s_mov_b32	s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
428
429	s_mov_b32	s_save_m0, m0
430
431	/* global mem offset */
432	s_mov_b32	s_save_mem_offset, 0x0
433	get_wave_size(s_wave_size)
434
435#if HAVE_XNACK
436	// Save and clear vector XNACK state late to free up SGPRs.
437	s_getreg_b32	s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK)
438	s_setreg_imm32_b32	hwreg(HW_REG_SHADER_XNACK_MASK), 0x0
439#endif
440
441	/* save first 4 VGPRs, needed for SGPR save */
442	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
443	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
444	s_and_b32	m0, m0, 1
445	s_cmp_eq_u32	m0, 1
446	s_cbranch_scc1	L_ENABLE_SAVE_4VGPR_EXEC_HI
447	s_mov_b32	exec_hi, 0x00000000
448	s_branch	L_SAVE_4VGPR_WAVE32
449L_ENABLE_SAVE_4VGPR_EXEC_HI:
450	s_mov_b32	exec_hi, 0xFFFFFFFF
451	s_branch	L_SAVE_4VGPR_WAVE64
452L_SAVE_4VGPR_WAVE32:
453	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
454
455	// VGPR Allocated in 4-GPR granularity
456
457#if !NO_SQC_STORE
458	buffer_store_dword	v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
459#endif
460	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128
461	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*2
462	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*3
463	s_branch	L_SAVE_HWREG
464
465L_SAVE_4VGPR_WAVE64:
466	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
467
468	// VGPR Allocated in 4-GPR granularity
469
470#if !NO_SQC_STORE
471	buffer_store_dword	v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
472#endif
473	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
474	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
475	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
476
477	/* save HW registers */
478
479L_SAVE_HWREG:
480	// HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
481	get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
482	get_svgpr_size_bytes(s_save_tmp)
483	s_add_u32	s_save_mem_offset, s_save_mem_offset, s_save_tmp
484	s_add_u32	s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
485
486	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
487
488#if NO_SQC_STORE
489	v_mov_b32	v0, 0x0							//Offset[31:0] from buffer resource
490	v_mov_b32	v1, 0x0							//Offset[63:32] from buffer resource
491	v_mov_b32	v2, 0x0							//Set of SGPRs for TCP store
492	s_mov_b32	m0, 0x0							//Next lane of v2 to write to
493#endif
494
495	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
496	write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
497	s_andn2_b32	s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
498	write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
499	write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset)
500	write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset)
501	write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset)
502
503	s_getreg_b32	s_save_tmp, hwreg(HW_REG_TRAPSTS)
504	write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
505
506	// Not used on Sienna_Cichlid but keep layout same for debugger.
507	write_hwreg_to_mem(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset)
508
509	s_getreg_b32	s_save_m0, hwreg(HW_REG_MODE)
510	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
511
512	s_getreg_b32	s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_LO)
513	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
514
515	s_getreg_b32	s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI)
516	write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
517
518#if NO_SQC_STORE
519	// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
520	s_mov_b32       exec_lo, 0xFFFF
521	s_mov_b32	exec_hi, 0x0
522	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
523
524	// Write SGPRs with 32 VGPR lanes. This works in wave32 and wave64 mode.
525	s_mov_b32       exec_lo, 0xFFFFFFFF
526#endif
527
528	/* save SGPRs */
529	// Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
530
531	// SGPR SR memory offset : size(VGPR)+size(SVGPR)
532	get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
533	get_svgpr_size_bytes(s_save_tmp)
534	s_add_u32	s_save_mem_offset, s_save_mem_offset, s_save_tmp
535	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
536
537#if NO_SQC_STORE
538	s_mov_b32	ttmp13, 0x0						//next VGPR lane to copy SGPR into
539#else
540	// backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
541	s_mov_b32	s_save_xnack_mask, s_save_buf_rsrc0
542	s_add_u32	s_save_buf_rsrc0, s_save_buf_rsrc0, s_save_mem_offset
543	s_addc_u32	s_save_buf_rsrc1, s_save_buf_rsrc1, 0
544#endif
545
546	s_mov_b32	m0, 0x0							//SGPR initial index value =0
547	s_nop		0x0							//Manually inserted wait states
548L_SAVE_SGPR_LOOP:
549	// SGPR is allocated in 16 SGPR granularity
550	s_movrels_b64	s0, s0							//s0 = s[0+m0], s1 = s[1+m0]
551	s_movrels_b64	s2, s2							//s2 = s[2+m0], s3 = s[3+m0]
552	s_movrels_b64	s4, s4							//s4 = s[4+m0], s5 = s[5+m0]
553	s_movrels_b64	s6, s6							//s6 = s[6+m0], s7 = s[7+m0]
554	s_movrels_b64	s8, s8							//s8 = s[8+m0], s9 = s[9+m0]
555	s_movrels_b64	s10, s10						//s10 = s[10+m0], s11 = s[11+m0]
556	s_movrels_b64	s12, s12						//s12 = s[12+m0], s13 = s[13+m0]
557	s_movrels_b64	s14, s14						//s14 = s[14+m0], s15 = s[15+m0]
558
559	write_16sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset)
560
561#if NO_SQC_STORE
562	s_cmp_eq_u32	ttmp13, 0x20						//have 32 VGPR lanes filled?
563	s_cbranch_scc0	L_SAVE_SGPR_SKIP_TCP_STORE
564
565	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
566	s_add_u32	s_save_mem_offset, s_save_mem_offset, 0x80
567	s_mov_b32	ttmp13, 0x0
568	v_mov_b32	v2, 0x0
569L_SAVE_SGPR_SKIP_TCP_STORE:
570#endif
571
572	s_add_u32	m0, m0, 16						//next sgpr index
573	s_cmp_lt_u32	m0, 96							//scc = (m0 < first 96 SGPR) ? 1 : 0
574	s_cbranch_scc1	L_SAVE_SGPR_LOOP					//first 96 SGPR save is complete?
575
576	//save the rest 12 SGPR
577	s_movrels_b64	s0, s0							//s0 = s[0+m0], s1 = s[1+m0]
578	s_movrels_b64	s2, s2							//s2 = s[2+m0], s3 = s[3+m0]
579	s_movrels_b64	s4, s4							//s4 = s[4+m0], s5 = s[5+m0]
580	s_movrels_b64	s6, s6							//s6 = s[6+m0], s7 = s[7+m0]
581	s_movrels_b64	s8, s8							//s8 = s[8+m0], s9 = s[9+m0]
582	s_movrels_b64	s10, s10						//s10 = s[10+m0], s11 = s[11+m0]
583	write_12sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset)
584
585#if NO_SQC_STORE
586	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
587#else
588	// restore s_save_buf_rsrc0,1
589	s_mov_b32	s_save_buf_rsrc0, s_save_xnack_mask
590#endif
591
592	/* save LDS */
593
594L_SAVE_LDS:
595	// Change EXEC to all threads...
596	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
597	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
598	s_and_b32	m0, m0, 1
599	s_cmp_eq_u32	m0, 1
600	s_cbranch_scc1	L_ENABLE_SAVE_LDS_EXEC_HI
601	s_mov_b32	exec_hi, 0x00000000
602	s_branch	L_SAVE_LDS_NORMAL
603L_ENABLE_SAVE_LDS_EXEC_HI:
604	s_mov_b32	exec_hi, 0xFFFFFFFF
605L_SAVE_LDS_NORMAL:
606	s_getreg_b32	s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
607	s_and_b32	s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF	//lds_size is zero?
608	s_cbranch_scc0	L_SAVE_LDS_DONE						//no lds used? jump to L_SAVE_DONE
609
610	s_barrier								//LDS is used? wait for other waves in the same TG
611	s_and_b32	s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
612	s_cbranch_scc0	L_SAVE_LDS_DONE
613
614	// first wave do LDS save;
615
616	s_lshl_b32	s_save_alloc_size, s_save_alloc_size, 6			//LDS size in dwords = lds_size * 64dw
617	s_lshl_b32	s_save_alloc_size, s_save_alloc_size, 2			//LDS size in bytes
618	s_mov_b32	s_save_buf_rsrc2, s_save_alloc_size			//NUM_RECORDS in bytes
619
620	// LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
621	//
622	get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
623	get_svgpr_size_bytes(s_save_tmp)
624	s_add_u32	s_save_mem_offset, s_save_mem_offset, s_save_tmp
625	s_add_u32	s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
626	s_add_u32	s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
627
628	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
629
630	//load 0~63*4(byte address) to vgpr v0
631	v_mbcnt_lo_u32_b32	v0, -1, 0
632	v_mbcnt_hi_u32_b32	v0, -1, v0
633	v_mul_u32_u24	v0, 4, v0
634
635	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
636	s_and_b32	m0, m0, 1
637	s_cmp_eq_u32	m0, 1
638	s_mov_b32	m0, 0x0
639	s_cbranch_scc1	L_SAVE_LDS_W64
640
641L_SAVE_LDS_W32:
642	s_mov_b32	s3, 128
643	s_nop		0
644	s_nop		0
645	s_nop		0
646L_SAVE_LDS_LOOP_W32:
647	ds_read_b32	v1, v0
648	s_waitcnt	0
649	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
650
651	s_add_u32	m0, m0, s3						//every buffer_store_lds does 256 bytes
652	s_add_u32	s_save_mem_offset, s_save_mem_offset, s3
653	v_add_nc_u32	v0, v0, 128						//mem offset increased by 128 bytes
654	s_cmp_lt_u32	m0, s_save_alloc_size					//scc=(m0 < s_save_alloc_size) ? 1 : 0
655	s_cbranch_scc1	L_SAVE_LDS_LOOP_W32					//LDS save is complete?
656
657	s_branch	L_SAVE_LDS_DONE
658
659L_SAVE_LDS_W64:
660	s_mov_b32	s3, 256
661	s_nop		0
662	s_nop		0
663	s_nop		0
664L_SAVE_LDS_LOOP_W64:
665	ds_read_b32	v1, v0
666	s_waitcnt	0
667	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
668
669	s_add_u32	m0, m0, s3						//every buffer_store_lds does 256 bytes
670	s_add_u32	s_save_mem_offset, s_save_mem_offset, s3
671	v_add_nc_u32	v0, v0, 256						//mem offset increased by 256 bytes
672	s_cmp_lt_u32	m0, s_save_alloc_size					//scc=(m0 < s_save_alloc_size) ? 1 : 0
673	s_cbranch_scc1	L_SAVE_LDS_LOOP_W64					//LDS save is complete?
674
675L_SAVE_LDS_DONE:
676	/* save VGPRs  - set the Rest VGPRs */
677L_SAVE_VGPR:
678	// VGPR SR memory offset: 0
679	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
680	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
681	s_and_b32	m0, m0, 1
682	s_cmp_eq_u32	m0, 1
683	s_cbranch_scc1	L_ENABLE_SAVE_VGPR_EXEC_HI
684	s_mov_b32	s_save_mem_offset, (0+128*4)				// for the rest VGPRs
685	s_mov_b32	exec_hi, 0x00000000
686	s_branch	L_SAVE_VGPR_NORMAL
687L_ENABLE_SAVE_VGPR_EXEC_HI:
688	s_mov_b32	s_save_mem_offset, (0+256*4)				// for the rest VGPRs
689	s_mov_b32	exec_hi, 0xFFFFFFFF
690L_SAVE_VGPR_NORMAL:
691	s_getreg_b32	s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
692	s_add_u32	s_save_alloc_size, s_save_alloc_size, 1
693	s_lshl_b32	s_save_alloc_size, s_save_alloc_size, 2			//Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)
694	//determine it is wave32 or wave64
695	s_lshr_b32	m0, s_wave_size, S_WAVE_SIZE
696	s_and_b32	m0, m0, 1
697	s_cmp_eq_u32	m0, 1
698	s_cbranch_scc1	L_SAVE_VGPR_WAVE64
699
700	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
701
702	// VGPR Allocated in 4-GPR granularity
703
704	// VGPR store using dw burst
705	s_mov_b32	m0, 0x4							//VGPR initial index value =4
706	s_cmp_lt_u32	m0, s_save_alloc_size
707	s_cbranch_scc0	L_SAVE_VGPR_END
708
709L_SAVE_VGPR_W32_LOOP:
710	v_movrels_b32	v0, v0							//v0 = v[0+m0]
711	v_movrels_b32	v1, v1							//v1 = v[1+m0]
712	v_movrels_b32	v2, v2							//v2 = v[2+m0]
713	v_movrels_b32	v3, v3							//v3 = v[3+m0]
714
715	buffer_store_dword	v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
716	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128
717	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*2
718	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*3
719
720	s_add_u32	m0, m0, 4						//next vgpr index
721	s_add_u32	s_save_mem_offset, s_save_mem_offset, 128*4		//every buffer_store_dword does 128 bytes
722	s_cmp_lt_u32	m0, s_save_alloc_size					//scc = (m0 < s_save_alloc_size) ? 1 : 0
723	s_cbranch_scc1	L_SAVE_VGPR_W32_LOOP					//VGPR save is complete?
724
725	s_branch	L_SAVE_VGPR_END
726
727L_SAVE_VGPR_WAVE64:
728	s_mov_b32	s_save_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
729
730	// VGPR store using dw burst
731	s_mov_b32	m0, 0x4							//VGPR initial index value =4
732	s_cmp_lt_u32	m0, s_save_alloc_size
733	s_cbranch_scc0	L_SAVE_SHARED_VGPR
734
735L_SAVE_VGPR_W64_LOOP:
736	v_movrels_b32	v0, v0							//v0 = v[0+m0]
737	v_movrels_b32	v1, v1							//v1 = v[1+m0]
738	v_movrels_b32	v2, v2							//v2 = v[2+m0]
739	v_movrels_b32	v3, v3							//v3 = v[3+m0]
740
741	buffer_store_dword	v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
742	buffer_store_dword	v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
743	buffer_store_dword	v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
744	buffer_store_dword	v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
745
746	s_add_u32	m0, m0, 4						//next vgpr index
747	s_add_u32	s_save_mem_offset, s_save_mem_offset, 256*4		//every buffer_store_dword does 256 bytes
748	s_cmp_lt_u32	m0, s_save_alloc_size					//scc = (m0 < s_save_alloc_size) ? 1 : 0
749	s_cbranch_scc1	L_SAVE_VGPR_W64_LOOP					//VGPR save is complete?
750
751L_SAVE_SHARED_VGPR:
752	//Below part will be the save shared vgpr part (new for gfx10)
753	s_getreg_b32	s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
754	s_and_b32	s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF	//shared_vgpr_size is zero?
755	s_cbranch_scc0	L_SAVE_VGPR_END						//no shared_vgpr used? jump to L_SAVE_LDS
756	s_lshl_b32	s_save_alloc_size, s_save_alloc_size, 3			//Number of SHARED_VGPRs = shared_vgpr_size * 8    (non-zero value)
757	//m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
758	//save shared_vgpr will start from the index of m0
759	s_add_u32	s_save_alloc_size, s_save_alloc_size, m0
760	s_mov_b32	exec_lo, 0xFFFFFFFF
761	s_mov_b32	exec_hi, 0x00000000
762L_SAVE_SHARED_VGPR_WAVE64_LOOP:
763	v_movrels_b32	v0, v0							//v0 = v[0+m0]
764	buffer_store_dword	v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
765	s_add_u32	m0, m0, 1						//next vgpr index
766	s_add_u32	s_save_mem_offset, s_save_mem_offset, 128
767	s_cmp_lt_u32	m0, s_save_alloc_size					//scc = (m0 < s_save_alloc_size) ? 1 : 0
768	s_cbranch_scc1	L_SAVE_SHARED_VGPR_WAVE64_LOOP				//SHARED_VGPR save is complete?
769
770L_SAVE_VGPR_END:
771	s_branch	L_END_PGM
772
773L_RESTORE:
774	/* Setup Resource Contants */
775	s_mov_b32	s_restore_buf_rsrc0, s_restore_spi_init_lo		//base_addr_lo
776	s_and_b32	s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF	//base_addr_hi
777	s_or_b32	s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
778	s_mov_b32	s_restore_buf_rsrc2, 0					//NUM_RECORDS initial value = 0 (in bytes)
779	s_mov_b32	s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
780
781	//determine it is wave32 or wave64
782	get_wave_size(s_restore_size)
783
784	s_and_b32	s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
785	s_cbranch_scc0	L_RESTORE_VGPR
786
787	/* restore LDS */
788L_RESTORE_LDS:
789	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
790	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
791	s_and_b32	m0, m0, 1
792	s_cmp_eq_u32	m0, 1
793	s_cbranch_scc1	L_ENABLE_RESTORE_LDS_EXEC_HI
794	s_mov_b32	exec_hi, 0x00000000
795	s_branch	L_RESTORE_LDS_NORMAL
796L_ENABLE_RESTORE_LDS_EXEC_HI:
797	s_mov_b32	exec_hi, 0xFFFFFFFF
798L_RESTORE_LDS_NORMAL:
799	s_getreg_b32	s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
800	s_and_b32	s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF	//lds_size is zero?
801	s_cbranch_scc0	L_RESTORE_VGPR						//no lds used? jump to L_RESTORE_VGPR
802	s_lshl_b32	s_restore_alloc_size, s_restore_alloc_size, 6		//LDS size in dwords = lds_size * 64dw
803	s_lshl_b32	s_restore_alloc_size, s_restore_alloc_size, 2		//LDS size in bytes
804	s_mov_b32	s_restore_buf_rsrc2, s_restore_alloc_size		//NUM_RECORDS in bytes
805
806	// LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
807	//
808	get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
809	get_svgpr_size_bytes(s_restore_tmp)
810	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
811	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
812	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()
813
814	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
815
816	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
817	s_and_b32	m0, m0, 1
818	s_cmp_eq_u32	m0, 1
819	s_mov_b32	m0, 0x0
820	s_cbranch_scc1	L_RESTORE_LDS_LOOP_W64
821
822L_RESTORE_LDS_LOOP_W32:
823#if HAVE_BUFFER_LDS_LOAD
824	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1	// first 64DW
825#else
826	buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
827	s_waitcnt	vmcnt(0)
828	ds_store_addtid_b32     v0
829#endif
830	s_add_u32	m0, m0, 128						// 128 DW
831	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128		//mem offset increased by 128DW
832	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc=(m0 < s_restore_alloc_size) ? 1 : 0
833	s_cbranch_scc1	L_RESTORE_LDS_LOOP_W32					//LDS restore is complete?
834	s_branch	L_RESTORE_VGPR
835
836L_RESTORE_LDS_LOOP_W64:
837#if HAVE_BUFFER_LDS_LOAD
838	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1	// first 64DW
839#else
840	buffer_load_dword       v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
841	s_waitcnt	vmcnt(0)
842	ds_store_addtid_b32     v0
843#endif
844	s_add_u32	m0, m0, 256						// 256 DW
845	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 256		//mem offset increased by 256DW
846	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc=(m0 < s_restore_alloc_size) ? 1 : 0
847	s_cbranch_scc1	L_RESTORE_LDS_LOOP_W64					//LDS restore is complete?
848
849	/* restore VGPRs */
850L_RESTORE_VGPR:
851	// VGPR SR memory offset : 0
852	s_mov_b32	s_restore_mem_offset, 0x0
853 	s_mov_b32	exec_lo, 0xFFFFFFFF					//need every thread from now on
854	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
855	s_and_b32	m0, m0, 1
856	s_cmp_eq_u32	m0, 1
857	s_cbranch_scc1	L_ENABLE_RESTORE_VGPR_EXEC_HI
858	s_mov_b32	exec_hi, 0x00000000
859	s_branch	L_RESTORE_VGPR_NORMAL
860L_ENABLE_RESTORE_VGPR_EXEC_HI:
861	s_mov_b32	exec_hi, 0xFFFFFFFF
862L_RESTORE_VGPR_NORMAL:
863	s_getreg_b32	s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
864	s_add_u32	s_restore_alloc_size, s_restore_alloc_size, 1
865	s_lshl_b32	s_restore_alloc_size, s_restore_alloc_size, 2		//Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)
866	//determine it is wave32 or wave64
867	s_lshr_b32	m0, s_restore_size, S_WAVE_SIZE
868	s_and_b32	m0, m0, 1
869	s_cmp_eq_u32	m0, 1
870	s_cbranch_scc1	L_RESTORE_VGPR_WAVE64
871
872	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
873
874	// VGPR load using dw burst
875	s_mov_b32	s_restore_mem_offset_save, s_restore_mem_offset		// restore start with v1, v0 will be the last
876	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128*4
877	s_mov_b32	m0, 4							//VGPR initial index value = 4
878	s_cmp_lt_u32	m0, s_restore_alloc_size
879	s_cbranch_scc0	L_RESTORE_SGPR
880
881L_RESTORE_VGPR_WAVE32_LOOP:
882	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
883	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128
884	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128*2
885	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128*3
886	s_waitcnt	vmcnt(0)
887	v_movreld_b32	v0, v0							//v[0+m0] = v0
888	v_movreld_b32	v1, v1
889	v_movreld_b32	v2, v2
890	v_movreld_b32	v3, v3
891	s_add_u32	m0, m0, 4						//next vgpr index
892	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128*4	//every buffer_load_dword does 128 bytes
893	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc = (m0 < s_restore_alloc_size) ? 1 : 0
894	s_cbranch_scc1	L_RESTORE_VGPR_WAVE32_LOOP				//VGPR restore (except v0) is complete?
895
896	/* VGPR restore on v0 */
897	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
898	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128
899	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*2
900	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*3
901	s_waitcnt	vmcnt(0)
902
903	s_branch	L_RESTORE_SGPR
904
905L_RESTORE_VGPR_WAVE64:
906	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
907
908	// VGPR load using dw burst
909	s_mov_b32	s_restore_mem_offset_save, s_restore_mem_offset		// restore start with v4, v0 will be the last
910	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 256*4
911	s_mov_b32	m0, 4							//VGPR initial index value = 4
912	s_cmp_lt_u32	m0, s_restore_alloc_size
913	s_cbranch_scc0	L_RESTORE_SHARED_VGPR
914
915L_RESTORE_VGPR_WAVE64_LOOP:
916	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
917	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
918	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
919	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
920	s_waitcnt	vmcnt(0)
921	v_movreld_b32	v0, v0							//v[0+m0] = v0
922	v_movreld_b32	v1, v1
923	v_movreld_b32	v2, v2
924	v_movreld_b32	v3, v3
925	s_add_u32	m0, m0, 4						//next vgpr index
926	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 256*4	//every buffer_load_dword does 256 bytes
927	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc = (m0 < s_restore_alloc_size) ? 1 : 0
928	s_cbranch_scc1	L_RESTORE_VGPR_WAVE64_LOOP				//VGPR restore (except v0) is complete?
929
930L_RESTORE_SHARED_VGPR:
931	//Below part will be the restore shared vgpr part (new for gfx10)
932	s_getreg_b32	s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)	//shared_vgpr_size
933	s_and_b32	s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF	//shared_vgpr_size is zero?
934	s_cbranch_scc0	L_RESTORE_V0						//no shared_vgpr used?
935	s_lshl_b32	s_restore_alloc_size, s_restore_alloc_size, 3		//Number of SHARED_VGPRs = shared_vgpr_size * 8    (non-zero value)
936	//m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
937	//restore shared_vgpr will start from the index of m0
938	s_add_u32	s_restore_alloc_size, s_restore_alloc_size, m0
939	s_mov_b32	exec_lo, 0xFFFFFFFF
940	s_mov_b32	exec_hi, 0x00000000
941L_RESTORE_SHARED_VGPR_WAVE64_LOOP:
942	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
943	s_waitcnt	vmcnt(0)
944	v_movreld_b32	v0, v0							//v[0+m0] = v0
945	s_add_u32	m0, m0, 1						//next vgpr index
946	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, 128
947	s_cmp_lt_u32	m0, s_restore_alloc_size				//scc = (m0 < s_restore_alloc_size) ? 1 : 0
948	s_cbranch_scc1	L_RESTORE_SHARED_VGPR_WAVE64_LOOP			//VGPR restore (except v0) is complete?
949
950	s_mov_b32	exec_hi, 0xFFFFFFFF					//restore back exec_hi before restoring V0!!
951
952	/* VGPR restore on v0 */
953L_RESTORE_V0:
954	buffer_load_dword	v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
955	buffer_load_dword	v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
956	buffer_load_dword	v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
957	buffer_load_dword	v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
958	s_waitcnt	vmcnt(0)
959
960	/* restore SGPRs */
961	//will be 2+8+16*6
962	// SGPR SR memory offset : size(VGPR)+size(SVGPR)
963L_RESTORE_SGPR:
964	get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
965	get_svgpr_size_bytes(s_restore_tmp)
966	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
967	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
968	s_sub_u32	s_restore_mem_offset, s_restore_mem_offset, 20*4	//s108~s127 is not saved
969
970	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
971
972	s_mov_b32	m0, s_sgpr_save_num
973
974	read_4sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
975	s_waitcnt	lgkmcnt(0)
976
977	s_sub_u32	m0, m0, 4						// Restore from S[0] to S[104]
978	s_nop		0							// hazard SALU M0=> S_MOVREL
979
980	s_movreld_b64	s0, s0							//s[0+m0] = s0
981	s_movreld_b64	s2, s2
982
983	read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
984	s_waitcnt	lgkmcnt(0)
985
986	s_sub_u32	m0, m0, 8						// Restore from S[0] to S[96]
987	s_nop		0							// hazard SALU M0=> S_MOVREL
988
989	s_movreld_b64	s0, s0							//s[0+m0] = s0
990	s_movreld_b64	s2, s2
991	s_movreld_b64	s4, s4
992	s_movreld_b64	s6, s6
993
994 L_RESTORE_SGPR_LOOP:
995	read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
996	s_waitcnt	lgkmcnt(0)
997
998	s_sub_u32	m0, m0, 16						// Restore from S[n] to S[0]
999	s_nop		0							// hazard SALU M0=> S_MOVREL
1000
1001	s_movreld_b64	s0, s0							//s[0+m0] = s0
1002	s_movreld_b64	s2, s2
1003	s_movreld_b64	s4, s4
1004	s_movreld_b64	s6, s6
1005	s_movreld_b64	s8, s8
1006	s_movreld_b64	s10, s10
1007	s_movreld_b64	s12, s12
1008	s_movreld_b64	s14, s14
1009
1010	s_cmp_eq_u32	m0, 0							//scc = (m0 < s_sgpr_save_num) ? 1 : 0
1011	s_cbranch_scc0	L_RESTORE_SGPR_LOOP
1012
1013	// s_barrier with MODE.DEBUG_EN=1, STATUS.PRIV=1 incorrectly asserts debug exception.
1014	// Clear DEBUG_EN before and restore MODE after the barrier.
1015	s_setreg_imm32_b32	hwreg(HW_REG_MODE), 0
1016	s_barrier								//barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG
1017
1018	/* restore HW registers */
1019L_RESTORE_HWREG:
1020	// HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
1021	get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
1022	get_svgpr_size_bytes(s_restore_tmp)
1023	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
1024	s_add_u32	s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
1025
1026	s_mov_b32	s_restore_buf_rsrc2, 0x1000000				//NUM_RECORDS in bytes
1027
1028	read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
1029	read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
1030	read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
1031	read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
1032	read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
1033	read_hwreg_from_mem(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset)
1034	read_hwreg_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset)
1035	read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset)
1036	read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset)
1037	read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
1038	s_waitcnt	lgkmcnt(0)
1039
1040	s_setreg_b32	hwreg(HW_REG_SHADER_FLAT_SCRATCH_LO), s_restore_flat_scratch
1041
1042	read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
1043	s_waitcnt	lgkmcnt(0)						//from now on, it is safe to restore STATUS and IB_STS
1044
1045	s_setreg_b32	hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI), s_restore_flat_scratch
1046
1047	s_mov_b32	m0, s_restore_m0
1048	s_mov_b32	exec_lo, s_restore_exec_lo
1049	s_mov_b32	exec_hi, s_restore_exec_hi
1050
1051	s_and_b32	s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
1052	s_setreg_b32	hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0
1053
1054#if HAVE_XNACK
1055	s_setreg_b32	hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask
1056#endif
1057
1058	s_and_b32	s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts
1059	s_lshr_b32	s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT
1060	s_setreg_b32	hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0
1061	s_setreg_b32	hwreg(HW_REG_MODE), s_restore_mode
1062
1063	// Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
1064	// ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
1065	get_vgpr_size_bytes(s_restore_ttmps_lo, s_restore_size)
1066	get_svgpr_size_bytes(s_restore_ttmps_hi)
1067	s_add_u32	s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
1068	s_add_u32	s_restore_ttmps_lo, s_restore_ttmps_lo, get_sgpr_size_bytes()
1069	s_add_u32	s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
1070	s_addc_u32	s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
1071	s_and_b32	s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
1072	s_load_dwordx4	[ttmp4, ttmp5, ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x50 glc:1
1073	s_load_dwordx4	[ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x60 glc:1
1074	s_load_dword	ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 glc:1
1075	s_waitcnt	lgkmcnt(0)
1076
1077#if HAVE_XNACK
1078	restore_ib_sts(s_restore_tmp, s_restore_m0)
1079#endif
1080
1081	s_and_b32	s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff		//pc[47:32] //Do it here in order not to affect STATUS
1082	s_and_b64	exec, exec, exec					// Restore STATUS.EXECZ, not writable by s_setreg_b32
1083	s_and_b64	vcc, vcc, vcc						// Restore STATUS.VCCZ, not writable by s_setreg_b32
1084
1085#if SW_SA_TRAP
1086	// If traps are enabled then return to the shader with PRIV=0.
1087	// Otherwise retain PRIV=1 for subsequent context save requests.
1088	s_getreg_b32	s_restore_tmp, hwreg(HW_REG_STATUS)
1089	s_bitcmp1_b32	s_restore_tmp, SQ_WAVE_STATUS_TRAP_EN_SHIFT
1090	s_cbranch_scc1	L_RETURN_WITHOUT_PRIV
1091
1092	s_setreg_b32	hwreg(HW_REG_STATUS), s_restore_status			// SCC is included, which is changed by previous salu
1093	s_setpc_b64	[s_restore_pc_lo, s_restore_pc_hi]
1094L_RETURN_WITHOUT_PRIV:
1095#endif
1096
1097	s_setreg_b32	hwreg(HW_REG_STATUS), s_restore_status			// SCC is included, which is changed by previous salu
1098	s_rfe_b64	s_restore_pc_lo						//Return to the main shader program and resume execution
1099
1100L_END_PGM:
1101	s_endpgm
1102end
1103
1104function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
1105#if NO_SQC_STORE
1106	// Copy into VGPR for later TCP store.
1107	v_writelane_b32	v2, s, m0
1108	s_add_u32	m0, m0, 0x1
1109#else
1110	s_mov_b32	exec_lo, m0
1111	s_mov_b32	m0, s_mem_offset
1112	s_buffer_store_dword	s, s_rsrc, m0 glc:1
1113	s_add_u32	s_mem_offset, s_mem_offset, 4
1114	s_mov_b32	m0, exec_lo
1115#endif
1116end
1117
1118
1119function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset)
1120#if NO_SQC_STORE
1121	// Copy into VGPR for later TCP store.
1122	for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
1123		v_writelane_b32	v2, s[sgpr_idx], ttmp13
1124		s_add_u32	ttmp13, ttmp13, 0x1
1125	end
1126#else
1127	s_buffer_store_dwordx4	s[0], s_rsrc, 0 glc:1
1128	s_buffer_store_dwordx4	s[4], s_rsrc, 16 glc:1
1129	s_buffer_store_dwordx4	s[8], s_rsrc, 32 glc:1
1130	s_buffer_store_dwordx4	s[12], s_rsrc, 48 glc:1
1131	s_add_u32	s_rsrc[0], s_rsrc[0], 4*16
1132	s_addc_u32	s_rsrc[1], s_rsrc[1], 0x0
1133#endif
1134end
1135
1136function write_12sgpr_to_mem(s, s_rsrc, s_mem_offset)
1137#if NO_SQC_STORE
1138	// Copy into VGPR for later TCP store.
1139	for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
1140		v_writelane_b32	v2, s[sgpr_idx], ttmp13
1141		s_add_u32	ttmp13, ttmp13, 0x1
1142	end
1143#else
1144	s_buffer_store_dwordx4	s[0], s_rsrc, 0 glc:1
1145	s_buffer_store_dwordx4	s[4], s_rsrc, 16 glc:1
1146	s_buffer_store_dwordx4	s[8], s_rsrc, 32 glc:1
1147	s_add_u32	s_rsrc[0], s_rsrc[0], 4*12
1148	s_addc_u32	s_rsrc[1], s_rsrc[1], 0x0
1149#endif
1150end
1151
1152function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
1153	s_buffer_load_dword	s, s_rsrc, s_mem_offset glc:1
1154	s_add_u32	s_mem_offset, s_mem_offset, 4
1155end
1156
1157function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
1158	s_sub_u32	s_mem_offset, s_mem_offset, 4*16
1159	s_buffer_load_dwordx16	s, s_rsrc, s_mem_offset glc:1
1160end
1161
1162function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset)
1163	s_sub_u32	s_mem_offset, s_mem_offset, 4*8
1164	s_buffer_load_dwordx8	s, s_rsrc, s_mem_offset glc:1
1165end
1166
1167function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset)
1168	s_sub_u32	s_mem_offset, s_mem_offset, 4*4
1169	s_buffer_load_dwordx4	s, s_rsrc, s_mem_offset glc:1
1170end
1171
1172
1173function get_lds_size_bytes(s_lds_size_byte)
1174	s_getreg_b32	s_lds_size_byte, hwreg(HW_REG_LDS_ALLOC, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
1175	s_lshl_b32	s_lds_size_byte, s_lds_size_byte, 8			//LDS size in dwords = lds_size * 64 *4Bytes // granularity 64DW
1176end
1177
1178function get_vgpr_size_bytes(s_vgpr_size_byte, s_size)
1179	s_getreg_b32	s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
1180	s_add_u32	s_vgpr_size_byte, s_vgpr_size_byte, 1
1181	s_bitcmp1_b32	s_size, S_WAVE_SIZE
1182	s_cbranch_scc1	L_ENABLE_SHIFT_W64
1183	s_lshl_b32	s_vgpr_size_byte, s_vgpr_size_byte, (2+7)		//Number of VGPRs = (vgpr_size + 1) * 4 * 32 * 4   (non-zero value)
1184	s_branch	L_SHIFT_DONE
1185L_ENABLE_SHIFT_W64:
1186	s_lshl_b32	s_vgpr_size_byte, s_vgpr_size_byte, (2+8)		//Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4   (non-zero value)
1187L_SHIFT_DONE:
1188end
1189
1190function get_svgpr_size_bytes(s_svgpr_size_byte)
1191	s_getreg_b32	s_svgpr_size_byte, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
1192	s_lshl_b32	s_svgpr_size_byte, s_svgpr_size_byte, (3+7)
1193end
1194
1195function get_sgpr_size_bytes
1196	return 512
1197end
1198
1199function get_hwreg_size_bytes
1200	return 128
1201end
1202
1203function get_wave_size(s_reg)
1204	s_getreg_b32	s_reg, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
1205	s_lshl_b32	s_reg, s_reg, S_WAVE_SIZE
1206end
1207
1208function save_and_clear_ib_sts(tmp1, tmp2)
1209	// Preserve and clear scalar XNACK state before issuing scalar loads.
1210	// Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into
1211	// unused space ttmp11[31:24].
1212	s_andn2_b32	ttmp11, ttmp11, (TTMP11_SAVE_REPLAY_W64H_MASK | TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK)
1213	s_getreg_b32	tmp1, hwreg(HW_REG_IB_STS)
1214	s_and_b32	tmp2, tmp1, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
1215	s_lshl_b32	tmp2, tmp2, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
1216	s_or_b32	ttmp11, ttmp11, tmp2
1217	s_and_b32	tmp2, tmp1, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
1218	s_lshl_b32	tmp2, tmp2, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
1219	s_or_b32	ttmp11, ttmp11, tmp2
1220	s_andn2_b32	tmp1, tmp1, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK)
1221	s_setreg_b32	hwreg(HW_REG_IB_STS), tmp1
1222end
1223
1224function restore_ib_sts(tmp1, tmp2)
1225	s_lshr_b32	tmp1, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
1226	s_and_b32	tmp2, tmp1, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
1227	s_lshr_b32	tmp1, ttmp11, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
1228	s_and_b32	tmp1, tmp1, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
1229	s_or_b32	tmp1, tmp1, tmp2
1230	s_setreg_b32	hwreg(HW_REG_IB_STS), tmp1
1231end
1232