1/*
2 * Copyright 2015-2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/* To compile this assembly code:
24 * PROJECT=vi ./sp3 cwsr_trap_handler_gfx8.asm -hex tmp.hex
25 */
26
27/* HW (VI) source code for CWSR trap handler */
28/* Version 18 + multiple trap handler */
29
30// this performance-optimal version was originally from Seven Xu at SRDC
31
32// Revison #18   --...
33/* Rev History
34** #1. Branch from gc dv.   //gfxip/gfx8/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV)
35** #4. SR Memory Layout:
36**             1. VGPR-SGPR-HWREG-{LDS}
37**             2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern..
38** #5. Update: 1. Accurate g8sr_ts_save_d timestamp
39** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation)
40** #7. Update: 1. don't barrier if noLDS
41** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version
42**             2. Fix SQ issue by s_sleep 2
43** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last
44**             2. optimize s_buffer save by burst 16sgprs...
45** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs.
46** #11. Update 1. Add 2 more timestamp for debug version
47** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance
48** #13. Integ  1. Always use MUBUF for PV trap shader...
49** #14. Update 1. s_buffer_store soft clause...
50** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot.
51** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree
52** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part]
53**             2. PERF - Save LDS before save VGPR to cover LDS save long latency...
54** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32
55**             2. FUNC - Handle non-CWSR traps
56*/
57
58var G8SR_WDMEM_HWREG_OFFSET = 0
59var G8SR_WDMEM_SGPR_OFFSET  = 128  // in bytes
60
61// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore.
62
63var G8SR_DEBUG_TIMESTAMP = 0
64var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4  // ts_save_d timestamp offset relative to SGPR_SR_memory_offset
65var s_g8sr_ts_save_s    = s[34:35]   // save start
66var s_g8sr_ts_sq_save_msg  = s[36:37]   // The save shader send SAVEWAVE msg to spi
67var s_g8sr_ts_spi_wrexec   = s[38:39]   // the SPI write the sr address to SQ
68var s_g8sr_ts_save_d    = s[40:41]   // save end
69var s_g8sr_ts_restore_s = s[42:43]   // restore start
70var s_g8sr_ts_restore_d = s[44:45]   // restore end
71
72var G8SR_VGPR_SR_IN_DWX4 = 0
73var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000    // DWx4 stride is 4*4Bytes
74var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4  = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4
75
76
77/*************************************************************************/
78/*                  control on how to run the shader                     */
79/*************************************************************************/
80//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
81var EMU_RUN_HACK                    =   0
82var EMU_RUN_HACK_RESTORE_NORMAL     =   0
83var EMU_RUN_HACK_SAVE_NORMAL_EXIT   =   0
84var EMU_RUN_HACK_SAVE_SINGLE_WAVE   =   0
85var EMU_RUN_HACK_SAVE_FIRST_TIME    =   0                   //for interrupted restore in which the first save is through EMU_RUN_HACK
86var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO =   0                   //for interrupted restore in which the first save is through EMU_RUN_HACK
87var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI =   0                   //for interrupted restore in which the first save is through EMU_RUN_HACK
88var SAVE_LDS                        =   1
89var WG_BASE_ADDR_LO                 =   0x9000a000
90var WG_BASE_ADDR_HI                 =   0x0
91var WAVE_SPACE                      =   0x5000              //memory size that each wave occupies in workgroup state mem
92var CTX_SAVE_CONTROL                =   0x0
93var CTX_RESTORE_CONTROL             =   CTX_SAVE_CONTROL
94var SIM_RUN_HACK                    =   0                   //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
95var SGPR_SAVE_USE_SQC               =   1                   //use SQC D$ to do the write
96var USE_MTBUF_INSTEAD_OF_MUBUF      =   0                   //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
97var SWIZZLE_EN                      =   0                   //whether we use swizzled buffer addressing
98
99/**************************************************************************/
100/*                      variables                                         */
101/**************************************************************************/
102var SQ_WAVE_STATUS_INST_ATC_SHIFT  = 23
103var SQ_WAVE_STATUS_INST_ATC_MASK   = 0x00800000
104var SQ_WAVE_STATUS_SPI_PRIO_SHIFT  = 1
105var SQ_WAVE_STATUS_SPI_PRIO_MASK   = 0x00000006
106var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT   = 0
107var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE    = 1
108var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT  = 3
109var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE   = 29
110
111var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT    = 12
112var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE     = 9
113var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT   = 8
114var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE    = 6
115var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT   = 24
116var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE    = 3                     //FIXME  sq.blk still has 4 bits at this time while SQ programming guide has 3 bits
117
118var SQ_WAVE_TRAPSTS_SAVECTX_MASK    =   0x400
119var SQ_WAVE_TRAPSTS_EXCE_MASK       =   0x1FF                   // Exception mask
120var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT   =   10
121var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK   =   0x100
122var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT  =   8
123var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK    =   0x3FF
124var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT   =   0x0
125var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE    =   10
126var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK   =   0xFFFFF800
127var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT  =   11
128var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE   =   21
129
130var SQ_WAVE_IB_STS_RCNT_SHIFT           =   16                  //FIXME
131var SQ_WAVE_IB_STS_RCNT_SIZE            =   4                   //FIXME
132var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT   =   15                  //FIXME
133var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE    =   1                   //FIXME
134var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG   = 0x00007FFF    //FIXME
135
136var SQ_BUF_RSRC_WORD1_ATC_SHIFT     =   24
137var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT   =   27
138
139
140/*      Save        */
141var S_SAVE_BUF_RSRC_WORD1_STRIDE        =   0x00040000          //stride is 4 bytes
142var S_SAVE_BUF_RSRC_WORD3_MISC          =   0x00807FAC          //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
143
144var S_SAVE_SPI_INIT_ATC_MASK            =   0x08000000          //bit[27]: ATC bit
145var S_SAVE_SPI_INIT_ATC_SHIFT           =   27
146var S_SAVE_SPI_INIT_MTYPE_MASK          =   0x70000000          //bit[30:28]: Mtype
147var S_SAVE_SPI_INIT_MTYPE_SHIFT         =   28
148var S_SAVE_SPI_INIT_FIRST_WAVE_MASK     =   0x04000000          //bit[26]: FirstWaveInTG
149var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT    =   26
150
151var S_SAVE_PC_HI_RCNT_SHIFT             =   28                  //FIXME  check with Brian to ensure all fields other than PC[47:0] can be used
152var S_SAVE_PC_HI_RCNT_MASK              =   0xF0000000          //FIXME
153var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT     =   27                  //FIXME
154var S_SAVE_PC_HI_FIRST_REPLAY_MASK      =   0x08000000          //FIXME
155
156var s_save_spi_init_lo              =   exec_lo
157var s_save_spi_init_hi              =   exec_hi
158
159                                                //tba_lo and tba_hi need to be saved/restored
160var s_save_pc_lo            =   ttmp0           //{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
161var s_save_pc_hi            =   ttmp1
162var s_save_exec_lo          =   ttmp2
163var s_save_exec_hi          =   ttmp3
164var s_save_status           =   ttmp4
165var s_save_trapsts          =   ttmp5           //not really used until the end of the SAVE routine
166var s_save_xnack_mask_lo    =   ttmp6
167var s_save_xnack_mask_hi    =   ttmp7
168var s_save_buf_rsrc0        =   ttmp8
169var s_save_buf_rsrc1        =   ttmp9
170var s_save_buf_rsrc2        =   ttmp10
171var s_save_buf_rsrc3        =   ttmp11
172
173var s_save_mem_offset       =   tma_lo
174var s_save_alloc_size       =   s_save_trapsts          //conflict
175var s_save_tmp              =   s_save_buf_rsrc2        //shared with s_save_buf_rsrc2  (conflict: should not use mem access with s_save_tmp at the same time)
176var s_save_m0               =   tma_hi
177
178/*      Restore     */
179var S_RESTORE_BUF_RSRC_WORD1_STRIDE         =   S_SAVE_BUF_RSRC_WORD1_STRIDE
180var S_RESTORE_BUF_RSRC_WORD3_MISC           =   S_SAVE_BUF_RSRC_WORD3_MISC
181
182var S_RESTORE_SPI_INIT_ATC_MASK             =   0x08000000          //bit[27]: ATC bit
183var S_RESTORE_SPI_INIT_ATC_SHIFT            =   27
184var S_RESTORE_SPI_INIT_MTYPE_MASK           =   0x70000000          //bit[30:28]: Mtype
185var S_RESTORE_SPI_INIT_MTYPE_SHIFT          =   28
186var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK      =   0x04000000          //bit[26]: FirstWaveInTG
187var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT     =   26
188
189var S_RESTORE_PC_HI_RCNT_SHIFT              =   S_SAVE_PC_HI_RCNT_SHIFT
190var S_RESTORE_PC_HI_RCNT_MASK               =   S_SAVE_PC_HI_RCNT_MASK
191var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT      =   S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
192var S_RESTORE_PC_HI_FIRST_REPLAY_MASK       =   S_SAVE_PC_HI_FIRST_REPLAY_MASK
193
194var s_restore_spi_init_lo                   =   exec_lo
195var s_restore_spi_init_hi                   =   exec_hi
196
197var s_restore_mem_offset        =   ttmp2
198var s_restore_alloc_size        =   ttmp3
199var s_restore_tmp               =   ttmp6               //tba_lo/hi need to be restored
200var s_restore_mem_offset_save   =   s_restore_tmp       //no conflict
201
202var s_restore_m0            =   s_restore_alloc_size    //no conflict
203
204var s_restore_mode          =   ttmp7
205
206var s_restore_pc_lo         =   ttmp0
207var s_restore_pc_hi         =   ttmp1
208var s_restore_exec_lo       =   tma_lo                  //no conflict
209var s_restore_exec_hi       =   tma_hi                  //no conflict
210var s_restore_status        =   ttmp4
211var s_restore_trapsts       =   ttmp5
212var s_restore_xnack_mask_lo =   xnack_mask_lo
213var s_restore_xnack_mask_hi =   xnack_mask_hi
214var s_restore_buf_rsrc0     =   ttmp8
215var s_restore_buf_rsrc1     =   ttmp9
216var s_restore_buf_rsrc2     =   ttmp10
217var s_restore_buf_rsrc3     =   ttmp11
218
219/**************************************************************************/
220/*                      trap handler entry points                         */
221/**************************************************************************/
222/* Shader Main*/
223
224shader main
225  asic(VI)
226  type(CS)
227
228
229    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))                   //hack to use trap_id for determining save/restore
230        //FIXME VCCZ un-init assertion s_getreg_b32     s_save_status, hwreg(HW_REG_STATUS)         //save STATUS since we will change SCC
231        s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000              //change SCC
232        s_cmp_eq_u32 s_save_tmp, 0x007e0000                         //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
233        s_cbranch_scc0 L_JUMP_TO_RESTORE                            //do not need to recover STATUS here  since we are going to RESTORE
234        //FIXME  s_setreg_b32   hwreg(HW_REG_STATUS),   s_save_status       //need to recover STATUS since we are going to SAVE
235        s_branch L_SKIP_RESTORE                                     //NOT restore, SAVE actually
236    else
237        s_branch L_SKIP_RESTORE                                     //NOT restore. might be a regular trap or save
238    end
239
240L_JUMP_TO_RESTORE:
241    s_branch L_RESTORE                                              //restore
242
243L_SKIP_RESTORE:
244
245    s_getreg_b32    s_save_status, hwreg(HW_REG_STATUS)                             //save STATUS since we will change SCC
246    s_andn2_b32     s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK      //check whether this is for save
247    s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
248    s_and_b32       s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK    //check whether this is for save
249    s_cbranch_scc1  L_SAVE                                      //this is the operation for save
250
251    // *********    Handle non-CWSR traps       *******************
252if (!EMU_RUN_HACK)
253    /* read tba and tma for next level trap handler, ttmp4 is used as s_save_status */
254    s_load_dwordx4  [ttmp8,ttmp9,ttmp10, ttmp11], [tma_lo,tma_hi], 0
255    s_waitcnt lgkmcnt(0)
256    s_or_b32        ttmp7, ttmp8, ttmp9
257    s_cbranch_scc0  L_NO_NEXT_TRAP //next level trap handler not been set
258    set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
259    s_setpc_b64     [ttmp8,ttmp9] //jump to next level trap handler
260
261L_NO_NEXT_TRAP:
262    s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
263    s_and_b32       s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception
264    s_cbranch_scc1  L_EXCP_CASE   // Exception, jump back to the shader program directly.
265    s_add_u32       ttmp0, ttmp0, 4   // S_TRAP case, add 4 to ttmp0
266    s_addc_u32  ttmp1, ttmp1, 0
267L_EXCP_CASE:
268    s_and_b32   ttmp1, ttmp1, 0xFFFF
269    set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
270    s_rfe_b64       [ttmp0, ttmp1]
271end
272    // *********        End handling of non-CWSR traps   *******************
273
274/**************************************************************************/
275/*                      save routine                                      */
276/**************************************************************************/
277
278L_SAVE:
279
280if G8SR_DEBUG_TIMESTAMP
281        s_memrealtime   s_g8sr_ts_save_s
282        s_waitcnt lgkmcnt(0)         //FIXME, will cause xnack??
283end
284
285    s_mov_b32       s_save_tmp, 0                                                           //clear saveCtx bit
286    s_setreg_b32    hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp     //clear saveCtx bit
287
288    s_mov_b32       s_save_xnack_mask_lo,   xnack_mask_lo                                   //save XNACK_MASK
289    s_mov_b32       s_save_xnack_mask_hi,   xnack_mask_hi    //save XNACK must before any memory operation
290    s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE)                   //save RCNT
291    s_lshl_b32      s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
292    s_or_b32        s_save_pc_hi, s_save_pc_hi, s_save_tmp
293    s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE)   //save FIRST_REPLAY
294    s_lshl_b32      s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
295    s_or_b32        s_save_pc_hi, s_save_pc_hi, s_save_tmp
296    s_getreg_b32    s_save_tmp, hwreg(HW_REG_IB_STS)                                        //clear RCNT and FIRST_REPLAY in IB_STS
297    s_and_b32       s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
298
299    s_setreg_b32    hwreg(HW_REG_IB_STS), s_save_tmp
300
301    /*      inform SPI the readiness and wait for SPI's go signal */
302    s_mov_b32       s_save_exec_lo, exec_lo                                                 //save EXEC and use EXEC for the go signal from SPI
303    s_mov_b32       s_save_exec_hi, exec_hi
304    s_mov_b64       exec,   0x0                                                             //clear EXEC to get ready to receive
305
306if G8SR_DEBUG_TIMESTAMP
307        s_memrealtime  s_g8sr_ts_sq_save_msg
308        s_waitcnt lgkmcnt(0)
309end
310
311    if (EMU_RUN_HACK)
312
313    else
314        s_sendmsg   sendmsg(MSG_SAVEWAVE)  //send SPI a message and wait for SPI's write to EXEC
315    end
316
317    // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
318    s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
319    s_setreg_b32 hwreg(HW_REG_STATUS), s_save_tmp
320
321  L_SLEEP:
322    s_sleep 0x2                // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
323
324    if (EMU_RUN_HACK)
325
326    else
327        s_cbranch_execz L_SLEEP
328    end
329
330if G8SR_DEBUG_TIMESTAMP
331        s_memrealtime  s_g8sr_ts_spi_wrexec
332        s_waitcnt lgkmcnt(0)
333end
334
335    /*      setup Resource Contants    */
336    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
337        //calculate wd_addr using absolute thread id
338        v_readlane_b32 s_save_tmp, v9, 0
339        s_lshr_b32 s_save_tmp, s_save_tmp, 6
340        s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
341        s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
342        s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
343        s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
344    else
345    end
346    if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
347        s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
348        s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
349        s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
350    else
351    end
352
353
354    s_mov_b32       s_save_buf_rsrc0,   s_save_spi_init_lo                                                      //base_addr_lo
355    s_and_b32       s_save_buf_rsrc1,   s_save_spi_init_hi, 0x0000FFFF                                          //base_addr_hi
356    s_or_b32        s_save_buf_rsrc1,   s_save_buf_rsrc1,  S_SAVE_BUF_RSRC_WORD1_STRIDE
357    s_mov_b32       s_save_buf_rsrc2,   0                                                                       //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
358    s_mov_b32       s_save_buf_rsrc3,   S_SAVE_BUF_RSRC_WORD3_MISC
359    s_and_b32       s_save_tmp,         s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
360    s_lshr_b32      s_save_tmp,         s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)         //get ATC bit into position
361    s_or_b32        s_save_buf_rsrc3,   s_save_buf_rsrc3,  s_save_tmp                                           //or ATC
362    s_and_b32       s_save_tmp,         s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
363    s_lshr_b32      s_save_tmp,         s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)     //get MTYPE bits into position
364    s_or_b32        s_save_buf_rsrc3,   s_save_buf_rsrc3,  s_save_tmp                                           //or MTYPE
365
366    //FIXME  right now s_save_m0/s_save_mem_offset use tma_lo/tma_hi  (might need to save them before using them?)
367    s_mov_b32       s_save_m0,          m0                                                                  //save M0
368
369    /*      global mem offset           */
370    s_mov_b32       s_save_mem_offset,  0x0                                                                     //mem offset initial value = 0
371
372
373
374
375    /*      save HW registers   */
376    //////////////////////////////
377
378  L_SAVE_HWREG:
379        // HWREG SR memory offset : size(VGPR)+size(SGPR)
380       get_vgpr_size_bytes(s_save_mem_offset)
381       get_sgpr_size_bytes(s_save_tmp)
382       s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
383
384
385    s_mov_b32       s_save_buf_rsrc2, 0x4                               //NUM_RECORDS   in bytes
386    if (SWIZZLE_EN)
387        s_add_u32       s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0                     //FIXME need to use swizzle to enable bounds checking?
388    else
389        s_mov_b32       s_save_buf_rsrc2,  0x1000000                                //NUM_RECORDS in bytes
390    end
391
392
393    write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)                  //M0
394
395    if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
396        s_add_u32 s_save_pc_lo, s_save_pc_lo, 4             //pc[31:0]+4
397        s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0          //carry bit over
398        s_mov_b32   tba_lo, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO
399        s_mov_b32   tba_hi, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI
400    end
401
402    write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)                   //PC
403    write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
404    write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset)             //EXEC
405    write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset)
406    write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset)              //STATUS
407
408    //s_save_trapsts conflicts with s_save_alloc_size
409    s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
410    write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset)             //TRAPSTS
411
412    write_hwreg_to_mem(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset)           //XNACK_MASK_LO
413    write_hwreg_to_mem(s_save_xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset)           //XNACK_MASK_HI
414
415    //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2
416    s_getreg_b32    s_save_m0, hwreg(HW_REG_MODE)                                                   //MODE
417    write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
418    write_hwreg_to_mem(tba_lo, s_save_buf_rsrc0, s_save_mem_offset)                     //TBA_LO
419    write_hwreg_to_mem(tba_hi, s_save_buf_rsrc0, s_save_mem_offset)                     //TBA_HI
420
421
422
423    /*      the first wave in the threadgroup    */
424        // save fist_wave bits in tba_hi unused bit.26
425    s_and_b32       s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK     // extract fisrt wave bit
426    //s_or_b32        tba_hi, s_save_tmp, tba_hi                                        // save first wave bit to tba_hi.bits[26]
427    s_mov_b32        s_save_exec_hi, 0x0
428    s_or_b32         s_save_exec_hi, s_save_tmp, s_save_exec_hi                          // save first wave bit to s_save_exec_hi.bits[26]
429
430
431    /*          save SGPRs      */
432        // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
433    //////////////////////////////
434
435    // SGPR SR memory offset : size(VGPR)
436    get_vgpr_size_bytes(s_save_mem_offset)
437    // TODO, change RSRC word to rearrange memory layout for SGPRS
438
439    s_getreg_b32    s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE)               //spgr_size
440    s_add_u32       s_save_alloc_size, s_save_alloc_size, 1
441    s_lshl_b32      s_save_alloc_size, s_save_alloc_size, 4                         //Number of SGPRs = (sgpr_size + 1) * 16   (non-zero value)
442
443    if (SGPR_SAVE_USE_SQC)
444        s_lshl_b32      s_save_buf_rsrc2,   s_save_alloc_size, 2                    //NUM_RECORDS in bytes
445    else
446        s_lshl_b32      s_save_buf_rsrc2,   s_save_alloc_size, 8                    //NUM_RECORDS in bytes (64 threads)
447    end
448
449    if (SWIZZLE_EN)
450        s_add_u32       s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0                     //FIXME need to use swizzle to enable bounds checking?
451    else
452        s_mov_b32       s_save_buf_rsrc2,  0x1000000                                //NUM_RECORDS in bytes
453    end
454
455
456    // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
457    //s_mov_b64 s_save_pc_lo, s_save_buf_rsrc0
458    s_mov_b64 s_save_xnack_mask_lo, s_save_buf_rsrc0
459    s_add_u32 s_save_buf_rsrc0, s_save_buf_rsrc0, s_save_mem_offset
460    s_addc_u32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0
461
462    s_mov_b32       m0, 0x0                         //SGPR initial index value =0
463  L_SAVE_SGPR_LOOP:
464    // SGPR is allocated in 16 SGPR granularity
465    s_movrels_b64   s0, s0     //s0 = s[0+m0], s1 = s[1+m0]
466    s_movrels_b64   s2, s2     //s2 = s[2+m0], s3 = s[3+m0]
467    s_movrels_b64   s4, s4     //s4 = s[4+m0], s5 = s[5+m0]
468    s_movrels_b64   s6, s6     //s6 = s[6+m0], s7 = s[7+m0]
469    s_movrels_b64   s8, s8     //s8 = s[8+m0], s9 = s[9+m0]
470    s_movrels_b64   s10, s10   //s10 = s[10+m0], s11 = s[11+m0]
471    s_movrels_b64   s12, s12   //s12 = s[12+m0], s13 = s[13+m0]
472    s_movrels_b64   s14, s14   //s14 = s[14+m0], s15 = s[15+m0]
473
474    write_16sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset) //PV: the best performance should be using s_buffer_store_dwordx4
475    s_add_u32       m0, m0, 16                                                      //next sgpr index
476    s_cmp_lt_u32    m0, s_save_alloc_size                                           //scc = (m0 < s_save_alloc_size) ? 1 : 0
477    s_cbranch_scc1  L_SAVE_SGPR_LOOP                                    //SGPR save is complete?
478    // restore s_save_buf_rsrc0,1
479    //s_mov_b64 s_save_buf_rsrc0, s_save_pc_lo
480    s_mov_b64 s_save_buf_rsrc0, s_save_xnack_mask_lo
481
482
483
484
485    /*          save first 4 VGPR, then LDS save could use   */
486        // each wave will alloc 4 vgprs at least...
487    /////////////////////////////////////////////////////////////////////////////////////
488
489    s_mov_b32       s_save_mem_offset, 0
490    s_mov_b32       exec_lo, 0xFFFFFFFF                                             //need every thread from now on
491    s_mov_b32       exec_hi, 0xFFFFFFFF
492
493    if (SWIZZLE_EN)
494        s_add_u32       s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0                     //FIXME need to use swizzle to enable bounds checking?
495    else
496        s_mov_b32       s_save_buf_rsrc2,  0x1000000                                //NUM_RECORDS in bytes
497    end
498
499
500    // VGPR Allocated in 4-GPR granularity
501
502if G8SR_VGPR_SR_IN_DWX4
503        // the const stride for DWx4 is 4*4 bytes
504        s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
505        s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4  // const stride to 4*4 bytes
506
507        buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
508
509        s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
510        s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE  // reset const stride to 4 bytes
511else
512        buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
513        buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256
514        buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*2
515        buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*3
516end
517
518
519
520    /*          save LDS        */
521    //////////////////////////////
522
523  L_SAVE_LDS:
524
525        // Change EXEC to all threads...
526    s_mov_b32       exec_lo, 0xFFFFFFFF   //need every thread from now on
527    s_mov_b32       exec_hi, 0xFFFFFFFF
528
529    s_getreg_b32    s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)             //lds_size
530    s_and_b32       s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF                //lds_size is zero?
531    s_cbranch_scc0  L_SAVE_LDS_DONE                                                                            //no lds used? jump to L_SAVE_DONE
532
533    s_barrier               //LDS is used? wait for other waves in the same TG
534    //s_and_b32     s_save_tmp, tba_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK                //exec is still used here
535    s_and_b32       s_save_tmp, s_save_exec_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK                //exec is still used here
536    s_cbranch_scc0  L_SAVE_LDS_DONE
537
538        // first wave do LDS save;
539
540    s_lshl_b32      s_save_alloc_size, s_save_alloc_size, 6                         //LDS size in dwords = lds_size * 64dw
541    s_lshl_b32      s_save_alloc_size, s_save_alloc_size, 2                         //LDS size in bytes
542    s_mov_b32       s_save_buf_rsrc2,  s_save_alloc_size                            //NUM_RECORDS in bytes
543
544    // LDS at offset: size(VGPR)+SIZE(SGPR)+SIZE(HWREG)
545    //
546    get_vgpr_size_bytes(s_save_mem_offset)
547    get_sgpr_size_bytes(s_save_tmp)
548    s_add_u32  s_save_mem_offset, s_save_mem_offset, s_save_tmp
549    s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
550
551
552    if (SWIZZLE_EN)
553        s_add_u32       s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0       //FIXME need to use swizzle to enable bounds checking?
554    else
555        s_mov_b32       s_save_buf_rsrc2,  0x1000000                  //NUM_RECORDS in bytes
556    end
557
558    s_mov_b32       m0, 0x0                                               //lds_offset initial value = 0
559
560
561var LDS_DMA_ENABLE = 0
562var UNROLL = 0
563if UNROLL==0 && LDS_DMA_ENABLE==1
564        s_mov_b32  s3, 256*2
565        s_nop 0
566        s_nop 0
567        s_nop 0
568  L_SAVE_LDS_LOOP:
569        //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.???
570    if (SAVE_LDS)     //SPI always alloc LDS space in 128DW granularity
571            buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1            // first 64DW
572            buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
573    end
574
575    s_add_u32       m0, m0, s3                                          //every buffer_store_lds does 256 bytes
576    s_add_u32       s_save_mem_offset, s_save_mem_offset, s3                            //mem offset increased by 256 bytes
577    s_cmp_lt_u32    m0, s_save_alloc_size                                               //scc=(m0 < s_save_alloc_size) ? 1 : 0
578    s_cbranch_scc1  L_SAVE_LDS_LOOP                                                     //LDS save is complete?
579
580elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL  , has ichace miss
581      // store from higest LDS address to lowest
582      s_mov_b32  s3, 256*2
583      s_sub_u32  m0, s_save_alloc_size, s3
584      s_add_u32 s_save_mem_offset, s_save_mem_offset, m0
585      s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9   // how many 128 trunks...
586      s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size   // store from higheset addr to lowest
587      s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4   // PC offset increment,  each LDS save block cost 6*4 Bytes instruction
588      s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4   //2is the below 2 inst...//s_addc and s_setpc
589      s_nop 0
590      s_nop 0
591      s_nop 0   //pad 3 dw to let LDS_DMA align with 64Bytes
592      s_getpc_b64 s[0:1]                              // reuse s[0:1], since s[0:1] already saved
593      s_add_u32   s0, s0,s_save_alloc_size
594      s_addc_u32  s1, s1, 0
595      s_setpc_b64 s[0:1]
596
597
598       for var i =0; i< 128; i++
599            // be careful to make here a 64Byte aligned address, which could improve performance...
600            buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0           // first 64DW
601            buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256           // second 64DW
602
603        if i!=127
604        s_sub_u32  m0, m0, s3      // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e.  pack more LDS_DMA inst to one Cacheline
605            s_sub_u32  s_save_mem_offset, s_save_mem_offset,  s3
606            end
607       end
608
609else   // BUFFER_STORE
610      v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0
611      v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2     // tid
612      v_mul_i32_i24 v2, v3, 8   // tid*8
613      v_mov_b32 v3, 256*2
614      s_mov_b32 m0, 0x10000
615      s_mov_b32 s0, s_save_buf_rsrc3
616      s_and_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, 0xFF7FFFFF    // disable add_tid
617      s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, 0x58000   //DFMT
618
619L_SAVE_LDS_LOOP_VECTOR:
620      ds_read_b64 v[0:1], v2    //x =LDS[a], byte address
621      s_waitcnt lgkmcnt(0)
622      buffer_store_dwordx2  v[0:1], v2, s_save_buf_rsrc0, s_save_mem_offset offen:1  glc:1  slc:1
623//      s_waitcnt vmcnt(0)
624      v_add_u32 v2, vcc[0:1], v2, v3
625      v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
626      s_cbranch_vccnz L_SAVE_LDS_LOOP_VECTOR
627
628      // restore rsrc3
629      s_mov_b32 s_save_buf_rsrc3, s0
630
631end
632
633L_SAVE_LDS_DONE:
634
635
636    /*          save VGPRs  - set the Rest VGPRs        */
637    //////////////////////////////////////////////////////////////////////////////////////
638  L_SAVE_VGPR:
639    // VGPR SR memory offset: 0
640    // TODO rearrange the RSRC words to use swizzle for VGPR save...
641
642    s_mov_b32       s_save_mem_offset, (0+256*4)                                    // for the rest VGPRs
643    s_mov_b32       exec_lo, 0xFFFFFFFF                                             //need every thread from now on
644    s_mov_b32       exec_hi, 0xFFFFFFFF
645
646    s_getreg_b32    s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)                   //vpgr_size
647    s_add_u32       s_save_alloc_size, s_save_alloc_size, 1
648    s_lshl_b32      s_save_alloc_size, s_save_alloc_size, 2                         //Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)   //FIXME for GFX, zero is possible
649    s_lshl_b32      s_save_buf_rsrc2,  s_save_alloc_size, 8                         //NUM_RECORDS in bytes (64 threads*4)
650    if (SWIZZLE_EN)
651        s_add_u32       s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0                     //FIXME need to use swizzle to enable bounds checking?
652    else
653        s_mov_b32       s_save_buf_rsrc2,  0x1000000                                //NUM_RECORDS in bytes
654    end
655
656
657    // VGPR Allocated in 4-GPR granularity
658
659if G8SR_VGPR_SR_IN_DWX4
660        // the const stride for DWx4 is 4*4 bytes
661        s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
662        s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4  // const stride to 4*4 bytes
663
664        s_mov_b32         m0, 4     // skip first 4 VGPRs
665        s_cmp_lt_u32      m0, s_save_alloc_size
666        s_cbranch_scc0    L_SAVE_VGPR_LOOP_END      // no more vgprs
667
668        s_set_gpr_idx_on  m0, 0x1   // This will change M0
669        s_add_u32         s_save_alloc_size, s_save_alloc_size, 0x1000  // because above inst change m0
670L_SAVE_VGPR_LOOP:
671        v_mov_b32         v0, v0   // v0 = v[0+m0]
672        v_mov_b32         v1, v1
673        v_mov_b32         v2, v2
674        v_mov_b32         v3, v3
675
676
677        buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
678        s_add_u32         m0, m0, 4
679        s_add_u32         s_save_mem_offset, s_save_mem_offset, 256*4
680        s_cmp_lt_u32      m0, s_save_alloc_size
681    s_cbranch_scc1  L_SAVE_VGPR_LOOP                                                //VGPR save is complete?
682    s_set_gpr_idx_off
683L_SAVE_VGPR_LOOP_END:
684
685        s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
686        s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE  // reset const stride to 4 bytes
687else
688    // VGPR store using dw burst
689    s_mov_b32         m0, 0x4   //VGPR initial index value =0
690    s_cmp_lt_u32      m0, s_save_alloc_size
691    s_cbranch_scc0    L_SAVE_VGPR_END
692
693
694    s_set_gpr_idx_on    m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
695    s_add_u32       s_save_alloc_size, s_save_alloc_size, 0x1000                    //add 0x1000 since we compare m0 against it later
696
697  L_SAVE_VGPR_LOOP:
698    v_mov_b32       v0, v0              //v0 = v[0+m0]
699    v_mov_b32       v1, v1              //v0 = v[0+m0]
700    v_mov_b32       v2, v2              //v0 = v[0+m0]
701    v_mov_b32       v3, v3              //v0 = v[0+m0]
702
703    if(USE_MTBUF_INSTEAD_OF_MUBUF)
704        tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
705    else
706        buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
707        buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256
708        buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*2
709        buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*3
710    end
711
712    s_add_u32       m0, m0, 4                                                       //next vgpr index
713    s_add_u32       s_save_mem_offset, s_save_mem_offset, 256*4                     //every buffer_store_dword does 256 bytes
714    s_cmp_lt_u32    m0, s_save_alloc_size                                           //scc = (m0 < s_save_alloc_size) ? 1 : 0
715    s_cbranch_scc1  L_SAVE_VGPR_LOOP                                                //VGPR save is complete?
716    s_set_gpr_idx_off
717end
718
719L_SAVE_VGPR_END:
720
721
722
723
724
725
726    /*     S_PGM_END_SAVED  */                              //FIXME  graphics ONLY
727    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
728        s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff    //pc[47:32]
729        s_add_u32 s_save_pc_lo, s_save_pc_lo, 4             //pc[31:0]+4
730        s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0          //carry bit over
731        s_rfe_b64 s_save_pc_lo                              //Return to the main shader program
732    else
733    end
734
735// Save Done timestamp
736if G8SR_DEBUG_TIMESTAMP
737        s_memrealtime   s_g8sr_ts_save_d
738        // SGPR SR memory offset : size(VGPR)
739        get_vgpr_size_bytes(s_save_mem_offset)
740        s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET
741        s_waitcnt lgkmcnt(0)         //FIXME, will cause xnack??
742        // Need reset rsrc2??
743        s_mov_b32 m0, s_save_mem_offset
744        s_mov_b32 s_save_buf_rsrc2,  0x1000000                                  //NUM_RECORDS in bytes
745        s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0       glc:1
746end
747
748
749    s_branch    L_END_PGM
750
751
752
753/**************************************************************************/
754/*                      restore routine                                   */
755/**************************************************************************/
756
757L_RESTORE:
758    /*      Setup Resource Contants    */
759    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
760        //calculate wd_addr using absolute thread id
761        v_readlane_b32 s_restore_tmp, v9, 0
762        s_lshr_b32 s_restore_tmp, s_restore_tmp, 6
763        s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
764        s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
765        s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
766        s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
767    else
768    end
769
770if G8SR_DEBUG_TIMESTAMP
771        s_memrealtime   s_g8sr_ts_restore_s
772        s_waitcnt lgkmcnt(0)         //FIXME, will cause xnack??
773        // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case...
774        s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0]
775        s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1]   //backup ts to ttmp0/1, sicne exec will be finally restored..
776end
777
778
779
780    s_mov_b32       s_restore_buf_rsrc0,    s_restore_spi_init_lo                                                           //base_addr_lo
781    s_and_b32       s_restore_buf_rsrc1,    s_restore_spi_init_hi, 0x0000FFFF                                               //base_addr_hi
782    s_or_b32        s_restore_buf_rsrc1,    s_restore_buf_rsrc1,  S_RESTORE_BUF_RSRC_WORD1_STRIDE
783    s_mov_b32       s_restore_buf_rsrc2,    0                                                                               //NUM_RECORDS initial value = 0 (in bytes)
784    s_mov_b32       s_restore_buf_rsrc3,    S_RESTORE_BUF_RSRC_WORD3_MISC
785    s_and_b32       s_restore_tmp,          s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
786    s_lshr_b32      s_restore_tmp,          s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)       //get ATC bit into position
787    s_or_b32        s_restore_buf_rsrc3,    s_restore_buf_rsrc3,  s_restore_tmp                                             //or ATC
788    s_and_b32       s_restore_tmp,          s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
789    s_lshr_b32      s_restore_tmp,          s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)   //get MTYPE bits into position
790    s_or_b32        s_restore_buf_rsrc3,    s_restore_buf_rsrc3,  s_restore_tmp                                             //or MTYPE
791
792    /*      global mem offset           */
793//  s_mov_b32       s_restore_mem_offset, 0x0                               //mem offset initial value = 0
794
795    /*      the first wave in the threadgroup    */
796    s_and_b32       s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
797    s_cbranch_scc0  L_RESTORE_VGPR
798
799    /*          restore LDS     */
800    //////////////////////////////
801  L_RESTORE_LDS:
802
803    s_mov_b32       exec_lo, 0xFFFFFFFF                                                     //need every thread from now on   //be consistent with SAVE although can be moved ahead
804    s_mov_b32       exec_hi, 0xFFFFFFFF
805
806    s_getreg_b32    s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)              //lds_size
807    s_and_b32       s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF                  //lds_size is zero?
808    s_cbranch_scc0  L_RESTORE_VGPR                                                          //no lds used? jump to L_RESTORE_VGPR
809    s_lshl_b32      s_restore_alloc_size, s_restore_alloc_size, 6                           //LDS size in dwords = lds_size * 64dw
810    s_lshl_b32      s_restore_alloc_size, s_restore_alloc_size, 2                           //LDS size in bytes
811    s_mov_b32       s_restore_buf_rsrc2,    s_restore_alloc_size                            //NUM_RECORDS in bytes
812
813    // LDS at offset: size(VGPR)+SIZE(SGPR)+SIZE(HWREG)
814    //
815    get_vgpr_size_bytes(s_restore_mem_offset)
816    get_sgpr_size_bytes(s_restore_tmp)
817    s_add_u32  s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
818    s_add_u32  s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()            //FIXME, Check if offset overflow???
819
820
821    if (SWIZZLE_EN)
822        s_add_u32       s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0                       //FIXME need to use swizzle to enable bounds checking?
823    else
824        s_mov_b32       s_restore_buf_rsrc2,  0x1000000                                     //NUM_RECORDS in bytes
825    end
826    s_mov_b32       m0, 0x0                                                                 //lds_offset initial value = 0
827
828  L_RESTORE_LDS_LOOP:
829    if (SAVE_LDS)
830        buffer_load_dword   v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1                    // first 64DW
831        buffer_load_dword   v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256         // second 64DW
832    end
833    s_add_u32       m0, m0, 256*2                                               // 128 DW
834    s_add_u32       s_restore_mem_offset, s_restore_mem_offset, 256*2           //mem offset increased by 128DW
835    s_cmp_lt_u32    m0, s_restore_alloc_size                                    //scc=(m0 < s_restore_alloc_size) ? 1 : 0
836    s_cbranch_scc1  L_RESTORE_LDS_LOOP                                                      //LDS restore is complete?
837
838
839    /*          restore VGPRs       */
840    //////////////////////////////
841  L_RESTORE_VGPR:
842        // VGPR SR memory offset : 0
843    s_mov_b32       s_restore_mem_offset, 0x0
844    s_mov_b32       exec_lo, 0xFFFFFFFF                                                     //need every thread from now on   //be consistent with SAVE although can be moved ahead
845    s_mov_b32       exec_hi, 0xFFFFFFFF
846
847    s_getreg_b32    s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)    //vpgr_size
848    s_add_u32       s_restore_alloc_size, s_restore_alloc_size, 1
849    s_lshl_b32      s_restore_alloc_size, s_restore_alloc_size, 2                           //Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)
850    s_lshl_b32      s_restore_buf_rsrc2,  s_restore_alloc_size, 8                           //NUM_RECORDS in bytes (64 threads*4)
851    if (SWIZZLE_EN)
852        s_add_u32       s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0                       //FIXME need to use swizzle to enable bounds checking?
853    else
854        s_mov_b32       s_restore_buf_rsrc2,  0x1000000                                     //NUM_RECORDS in bytes
855    end
856
857if G8SR_VGPR_SR_IN_DWX4
858     get_vgpr_size_bytes(s_restore_mem_offset)
859     s_sub_u32         s_restore_mem_offset, s_restore_mem_offset, 256*4
860
861     // the const stride for DWx4 is 4*4 bytes
862     s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
863     s_or_b32  s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4  // const stride to 4*4 bytes
864
865     s_mov_b32         m0, s_restore_alloc_size
866     s_set_gpr_idx_on  m0, 0x8    // Note.. This will change m0
867
868L_RESTORE_VGPR_LOOP:
869     buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
870     s_waitcnt vmcnt(0)
871     s_sub_u32         m0, m0, 4
872     v_mov_b32         v0, v0   // v[0+m0] = v0
873     v_mov_b32         v1, v1
874     v_mov_b32         v2, v2
875     v_mov_b32         v3, v3
876     s_sub_u32         s_restore_mem_offset, s_restore_mem_offset, 256*4
877     s_cmp_eq_u32      m0, 0x8000
878     s_cbranch_scc0    L_RESTORE_VGPR_LOOP
879     s_set_gpr_idx_off
880
881     s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
882     s_or_b32  s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE  // const stride to 4*4 bytes
883
884else
885    // VGPR load using dw burst
886    s_mov_b32       s_restore_mem_offset_save, s_restore_mem_offset     // restore start with v1, v0 will be the last
887    s_add_u32       s_restore_mem_offset, s_restore_mem_offset, 256*4
888    s_mov_b32       m0, 4                               //VGPR initial index value = 1
889    s_set_gpr_idx_on  m0, 0x8                       //M0[7:0] = M0[7:0] and M0[15:12] = 0x8
890    s_add_u32       s_restore_alloc_size, s_restore_alloc_size, 0x8000                      //add 0x8000 since we compare m0 against it later
891
892  L_RESTORE_VGPR_LOOP:
893    if(USE_MTBUF_INSTEAD_OF_MUBUF)
894        tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
895    else
896        buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
897        buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
898        buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
899        buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
900    end
901    s_waitcnt       vmcnt(0)                                                                //ensure data ready
902    v_mov_b32       v0, v0                                                                  //v[0+m0] = v0
903    v_mov_b32       v1, v1
904    v_mov_b32       v2, v2
905    v_mov_b32       v3, v3
906    s_add_u32       m0, m0, 4                                                               //next vgpr index
907    s_add_u32       s_restore_mem_offset, s_restore_mem_offset, 256*4                           //every buffer_load_dword does 256 bytes
908    s_cmp_lt_u32    m0, s_restore_alloc_size                                                //scc = (m0 < s_restore_alloc_size) ? 1 : 0
909    s_cbranch_scc1  L_RESTORE_VGPR_LOOP                                                     //VGPR restore (except v0) is complete?
910    s_set_gpr_idx_off
911                                                                                            /* VGPR restore on v0 */
912    if(USE_MTBUF_INSTEAD_OF_MUBUF)
913        tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
914    else
915        buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1
916        buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1 offset:256
917        buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1 offset:256*2
918        buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1 offset:256*3
919    end
920
921end
922
923    /*          restore SGPRs       */
924    //////////////////////////////
925
926    // SGPR SR memory offset : size(VGPR)
927    get_vgpr_size_bytes(s_restore_mem_offset)
928    get_sgpr_size_bytes(s_restore_tmp)
929    s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
930    s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 16*4     // restore SGPR from S[n] to S[0], by 16 sgprs group
931    // TODO, change RSRC word to rearrange memory layout for SGPRS
932
933    s_getreg_b32    s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE)                //spgr_size
934    s_add_u32       s_restore_alloc_size, s_restore_alloc_size, 1
935    s_lshl_b32      s_restore_alloc_size, s_restore_alloc_size, 4                           //Number of SGPRs = (sgpr_size + 1) * 16   (non-zero value)
936
937    if (SGPR_SAVE_USE_SQC)
938        s_lshl_b32      s_restore_buf_rsrc2,    s_restore_alloc_size, 2                     //NUM_RECORDS in bytes
939    else
940        s_lshl_b32      s_restore_buf_rsrc2,    s_restore_alloc_size, 8                     //NUM_RECORDS in bytes (64 threads)
941    end
942    if (SWIZZLE_EN)
943        s_add_u32       s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0                       //FIXME need to use swizzle to enable bounds checking?
944    else
945        s_mov_b32       s_restore_buf_rsrc2,  0x1000000                                     //NUM_RECORDS in bytes
946    end
947
948    /* If 112 SGPRs ar allocated, 4 sgprs are not used TBA(108,109),TMA(110,111),
949       However, we are safe to restore these 4 SGPRs anyway, since TBA,TMA will later be restored by HWREG
950    */
951    s_mov_b32 m0, s_restore_alloc_size
952
953 L_RESTORE_SGPR_LOOP:
954    read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)  //PV: further performance improvement can be made
955    s_waitcnt       lgkmcnt(0)                                                              //ensure data ready
956
957    s_sub_u32 m0, m0, 16    // Restore from S[n] to S[0]
958
959    s_movreld_b64   s0, s0      //s[0+m0] = s0
960    s_movreld_b64   s2, s2
961    s_movreld_b64   s4, s4
962    s_movreld_b64   s6, s6
963    s_movreld_b64   s8, s8
964    s_movreld_b64   s10, s10
965    s_movreld_b64   s12, s12
966    s_movreld_b64   s14, s14
967
968    s_cmp_eq_u32    m0, 0               //scc = (m0 < s_restore_alloc_size) ? 1 : 0
969    s_cbranch_scc0  L_RESTORE_SGPR_LOOP             //SGPR restore (except s0) is complete?
970
971    /*      restore HW registers    */
972    //////////////////////////////
973  L_RESTORE_HWREG:
974
975
976if G8SR_DEBUG_TIMESTAMP
977      s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo
978      s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi
979end
980
981    // HWREG SR memory offset : size(VGPR)+size(SGPR)
982    get_vgpr_size_bytes(s_restore_mem_offset)
983    get_sgpr_size_bytes(s_restore_tmp)
984    s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
985
986
987    s_mov_b32       s_restore_buf_rsrc2, 0x4                                                //NUM_RECORDS   in bytes
988    if (SWIZZLE_EN)
989        s_add_u32       s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0                       //FIXME need to use swizzle to enable bounds checking?
990    else
991        s_mov_b32       s_restore_buf_rsrc2,  0x1000000                                     //NUM_RECORDS in bytes
992    end
993
994    read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)                    //M0
995    read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)             //PC
996    read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
997    read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset)               //EXEC
998    read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
999    read_hwreg_from_mem(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset)                //STATUS
1000    read_hwreg_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset)               //TRAPSTS
1001    read_hwreg_from_mem(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset)                   //XNACK_MASK_LO
1002    read_hwreg_from_mem(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset)                   //XNACK_MASK_HI
1003    read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset)              //MODE
1004    read_hwreg_from_mem(tba_lo, s_restore_buf_rsrc0, s_restore_mem_offset)                      //TBA_LO
1005    read_hwreg_from_mem(tba_hi, s_restore_buf_rsrc0, s_restore_mem_offset)                      //TBA_HI
1006
1007    s_waitcnt       lgkmcnt(0)                                                                                      //from now on, it is safe to restore STATUS and IB_STS
1008
1009    //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
1010    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
1011        s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8            //pc[31:0]+8     //two back-to-back s_trap are used (first for save and second for restore)
1012        s_addc_u32  s_restore_pc_hi, s_restore_pc_hi, 0x0        //carry bit over
1013    end
1014    if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
1015        s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4            //pc[31:0]+4     // save is hack through s_trap but restore is normal
1016        s_addc_u32  s_restore_pc_hi, s_restore_pc_hi, 0x0        //carry bit over
1017    end
1018
1019    s_mov_b32       m0,         s_restore_m0
1020    s_mov_b32       exec_lo,    s_restore_exec_lo
1021    s_mov_b32       exec_hi,    s_restore_exec_hi
1022
1023    s_and_b32       s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
1024    s_setreg_b32    hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0
1025    s_and_b32       s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts
1026    s_lshr_b32      s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT
1027    s_setreg_b32    hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0
1028    //s_setreg_b32  hwreg(HW_REG_TRAPSTS),  s_restore_trapsts      //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore
1029    s_setreg_b32    hwreg(HW_REG_MODE),     s_restore_mode
1030    //reuse s_restore_m0 as a temp register
1031    s_and_b32       s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
1032    s_lshr_b32      s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
1033    s_lshl_b32      s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
1034    s_mov_b32       s_restore_tmp, 0x0                                                                              //IB_STS is zero
1035    s_or_b32        s_restore_tmp, s_restore_tmp, s_restore_m0
1036    s_and_b32       s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
1037    s_lshr_b32      s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
1038    s_lshl_b32      s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
1039    s_or_b32        s_restore_tmp, s_restore_tmp, s_restore_m0
1040    s_and_b32       s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
1041    s_lshr_b32      s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
1042    s_setreg_b32    hwreg(HW_REG_IB_STS),   s_restore_tmp
1043
1044    s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff      //pc[47:32]        //Do it here in order not to affect STATUS
1045    s_and_b64    exec, exec, exec  // Restore STATUS.EXECZ, not writable by s_setreg_b32
1046    s_and_b64    vcc, vcc, vcc  // Restore STATUS.VCCZ, not writable by s_setreg_b32
1047    set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
1048
1049    s_barrier                                                   //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
1050
1051if G8SR_DEBUG_TIMESTAMP
1052    s_memrealtime s_g8sr_ts_restore_d
1053    s_waitcnt lgkmcnt(0)
1054end
1055
1056//  s_rfe_b64 s_restore_pc_lo                                   //Return to the main shader program and resume execution
1057    s_rfe_restore_b64  s_restore_pc_lo, s_restore_m0            // s_restore_m0[0] is used to set STATUS.inst_atc
1058
1059
1060/**************************************************************************/
1061/*                      the END                                           */
1062/**************************************************************************/
1063L_END_PGM:
1064    s_endpgm
1065
1066end
1067
1068
1069/**************************************************************************/
1070/*                      the helper functions                              */
1071/**************************************************************************/
1072
1073//Only for save hwreg to mem
1074function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
1075        s_mov_b32 exec_lo, m0                   //assuming exec_lo is not needed anymore from this point on
1076        s_mov_b32 m0, s_mem_offset
1077        s_buffer_store_dword s, s_rsrc, m0      glc:1
1078        s_add_u32       s_mem_offset, s_mem_offset, 4
1079        s_mov_b32   m0, exec_lo
1080end
1081
1082
1083// HWREG are saved before SGPRs, so all HWREG could be use.
1084function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset)
1085
1086        s_buffer_store_dwordx4 s[0], s_rsrc, 0  glc:1
1087        s_buffer_store_dwordx4 s[4], s_rsrc, 16  glc:1
1088        s_buffer_store_dwordx4 s[8], s_rsrc, 32  glc:1
1089        s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1
1090        s_add_u32       s_rsrc[0], s_rsrc[0], 4*16
1091        s_addc_u32      s_rsrc[1], s_rsrc[1], 0x0             // +scc
1092end
1093
1094
1095function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
1096    s_buffer_load_dword s, s_rsrc, s_mem_offset     glc:1
1097    s_add_u32       s_mem_offset, s_mem_offset, 4
1098end
1099
1100function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
1101    s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset      glc:1
1102    s_sub_u32       s_mem_offset, s_mem_offset, 4*16
1103end
1104
1105
1106
1107function get_lds_size_bytes(s_lds_size_byte)
1108    // SQ LDS granularity is 64DW, while PGM_RSRC2.lds_size is in granularity 128DW
1109    s_getreg_b32   s_lds_size_byte, hwreg(HW_REG_LDS_ALLOC, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)          // lds_size
1110    s_lshl_b32     s_lds_size_byte, s_lds_size_byte, 8                      //LDS size in dwords = lds_size * 64 *4Bytes    // granularity 64DW
1111end
1112
1113function get_vgpr_size_bytes(s_vgpr_size_byte)
1114    s_getreg_b32   s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)  //vpgr_size
1115    s_add_u32      s_vgpr_size_byte, s_vgpr_size_byte, 1
1116    s_lshl_b32     s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4   (non-zero value)   //FIXME for GFX, zero is possible
1117end
1118
1119function get_sgpr_size_bytes(s_sgpr_size_byte)
1120    s_getreg_b32   s_sgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE)  //spgr_size
1121    s_add_u32      s_sgpr_size_byte, s_sgpr_size_byte, 1
1122    s_lshl_b32     s_sgpr_size_byte, s_sgpr_size_byte, 6 //Number of SGPRs = (sgpr_size + 1) * 16 *4   (non-zero value)
1123end
1124
1125function get_hwreg_size_bytes
1126    return 128 //HWREG size 128 bytes
1127end
1128
1129function set_status_without_spi_prio(status, tmp)
1130    // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
1131    s_lshr_b32      tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
1132    s_setreg_b32    hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
1133    s_nop           0x2 // avoid S_SETREG => S_SETREG hazard
1134    s_setreg_b32    hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
1135end
1136