xref: /openbmc/qemu/target/hexagon/gen_tcg.h (revision 51e47cf8)
1 /*
2  *  Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify
5  *  it under the terms of the GNU General Public License as published by
6  *  the Free Software Foundation; either version 2 of the License, or
7  *  (at your option) any later version.
8  *
9  *  This program is distributed in the hope that it will be useful,
10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  *  GNU General Public License for more details.
13  *
14  *  You should have received a copy of the GNU General Public License
15  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #ifndef HEXAGON_GEN_TCG_H
19 #define HEXAGON_GEN_TCG_H
20 
21 /*
22  * Here is a primer to understand the tag names for load/store instructions
23  *
24  * Data types
25  *      b        signed byte                       r0 = memb(r2+#0)
26  *     ub        unsigned byte                     r0 = memub(r2+#0)
27  *      h        signed half word (16 bits)        r0 = memh(r2+#0)
28  *     uh        unsigned half word                r0 = memuh(r2+#0)
29  *      i        integer (32 bits)                 r0 = memw(r2+#0)
30  *      d        double word (64 bits)             r1:0 = memd(r2+#0)
31  *
32  * Addressing modes
33  *     _io       indirect with offset              r0 = memw(r1+#4)
34  *     _ur       absolute with register offset     r0 = memw(r1<<#4+##variable)
35  *     _rr       indirect with register offset     r0 = memw(r1+r4<<#2)
36  *     gp        global pointer relative           r0 = memw(gp+#200)
37  *     _sp       stack pointer relative            r0 = memw(r29+#12)
38  *     _ap       absolute set                      r0 = memw(r1=##variable)
39  *     _pr       post increment register           r0 = memw(r1++m1)
40  *     _pbr      post increment bit reverse        r0 = memw(r1++m1:brev)
41  *     _pi       post increment immediate          r0 = memb(r1++#1)
42  *     _pci      post increment circular immediate r0 = memw(r1++#4:circ(m0))
43  *     _pcr      post increment circular register  r0 = memw(r1++I:circ(m0))
44  */
45 
46 /* Macros for complex addressing modes */
47 #define GET_EA_ap \
48     do { \
49         fEA_IMM(UiV); \
50         tcg_gen_movi_tl(ReV, UiV); \
51     } while (0)
52 #define GET_EA_pr \
53     do { \
54         fEA_REG(RxV); \
55         fPM_M(RxV, MuV); \
56     } while (0)
57 #define GET_EA_pbr \
58     do { \
59         gen_helper_fbrev(EA, RxV); \
60         tcg_gen_add_tl(RxV, RxV, MuV); \
61     } while (0)
62 #define GET_EA_pi \
63     do { \
64         fEA_REG(RxV); \
65         fPM_I(RxV, siV); \
66     } while (0)
67 #define GET_EA_pci \
68     do { \
69         TCGv tcgv_siV = tcg_constant_tl(siV); \
70         tcg_gen_mov_tl(EA, RxV); \
71         gen_helper_fcircadd(RxV, RxV, tcgv_siV, MuV, \
72                             hex_gpr[HEX_REG_CS0 + MuN]); \
73     } while (0)
74 #define GET_EA_pcr(SHIFT) \
75     do { \
76         TCGv ireg = tcg_temp_new(); \
77         tcg_gen_mov_tl(EA, RxV); \
78         gen_read_ireg(ireg, MuV, (SHIFT)); \
79         gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
80     } while (0)
81 
82 /* Instructions with multiple definitions */
83 #define fGEN_TCG_LOAD_AP(RES, SIZE, SIGN) \
84     do { \
85         fMUST_IMMEXT(UiV); \
86         fEA_IMM(UiV); \
87         fLOAD(1, SIZE, SIGN, EA, RES); \
88         tcg_gen_movi_tl(ReV, UiV); \
89     } while (0)
90 
91 #define fGEN_TCG_L4_loadrub_ap(SHORTCODE) \
92     fGEN_TCG_LOAD_AP(RdV, 1, u)
93 #define fGEN_TCG_L4_loadrb_ap(SHORTCODE) \
94     fGEN_TCG_LOAD_AP(RdV, 1, s)
95 #define fGEN_TCG_L4_loadruh_ap(SHORTCODE) \
96     fGEN_TCG_LOAD_AP(RdV, 2, u)
97 #define fGEN_TCG_L4_loadrh_ap(SHORTCODE) \
98     fGEN_TCG_LOAD_AP(RdV, 2, s)
99 #define fGEN_TCG_L4_loadri_ap(SHORTCODE) \
100     fGEN_TCG_LOAD_AP(RdV, 4, u)
101 #define fGEN_TCG_L4_loadrd_ap(SHORTCODE) \
102     fGEN_TCG_LOAD_AP(RddV, 8, u)
103 
104 #define fGEN_TCG_L2_loadrub_pci(SHORTCODE)    SHORTCODE
105 #define fGEN_TCG_L2_loadrb_pci(SHORTCODE)     SHORTCODE
106 #define fGEN_TCG_L2_loadruh_pci(SHORTCODE)    SHORTCODE
107 #define fGEN_TCG_L2_loadrh_pci(SHORTCODE)     SHORTCODE
108 #define fGEN_TCG_L2_loadri_pci(SHORTCODE)     SHORTCODE
109 #define fGEN_TCG_L2_loadrd_pci(SHORTCODE)     SHORTCODE
110 
111 #define fGEN_TCG_LOAD_pcr(SHIFT, LOAD) \
112     do { \
113         TCGv ireg = tcg_temp_new(); \
114         tcg_gen_mov_tl(EA, RxV); \
115         gen_read_ireg(ireg, MuV, SHIFT); \
116         gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
117         LOAD; \
118     } while (0)
119 
120 #define fGEN_TCG_L2_loadrub_pcr(SHORTCODE) \
121       fGEN_TCG_LOAD_pcr(0, fLOAD(1, 1, u, EA, RdV))
122 #define fGEN_TCG_L2_loadrb_pcr(SHORTCODE) \
123       fGEN_TCG_LOAD_pcr(0, fLOAD(1, 1, s, EA, RdV))
124 #define fGEN_TCG_L2_loadruh_pcr(SHORTCODE) \
125       fGEN_TCG_LOAD_pcr(1, fLOAD(1, 2, u, EA, RdV))
126 #define fGEN_TCG_L2_loadrh_pcr(SHORTCODE) \
127       fGEN_TCG_LOAD_pcr(1, fLOAD(1, 2, s, EA, RdV))
128 #define fGEN_TCG_L2_loadri_pcr(SHORTCODE) \
129       fGEN_TCG_LOAD_pcr(2, fLOAD(1, 4, u, EA, RdV))
130 #define fGEN_TCG_L2_loadrd_pcr(SHORTCODE) \
131       fGEN_TCG_LOAD_pcr(3, fLOAD(1, 8, u, EA, RddV))
132 
133 #define fGEN_TCG_L2_loadrub_pr(SHORTCODE)      SHORTCODE
134 #define fGEN_TCG_L2_loadrub_pbr(SHORTCODE)     SHORTCODE
135 #define fGEN_TCG_L2_loadrub_pi(SHORTCODE)      SHORTCODE
136 #define fGEN_TCG_L2_loadrb_pr(SHORTCODE)       SHORTCODE
137 #define fGEN_TCG_L2_loadrb_pbr(SHORTCODE)      SHORTCODE
138 #define fGEN_TCG_L2_loadrb_pi(SHORTCODE)       SHORTCODE
139 #define fGEN_TCG_L2_loadruh_pr(SHORTCODE)      SHORTCODE
140 #define fGEN_TCG_L2_loadruh_pbr(SHORTCODE)     SHORTCODE
141 #define fGEN_TCG_L2_loadruh_pi(SHORTCODE)      SHORTCODE
142 #define fGEN_TCG_L2_loadrh_pr(SHORTCODE)       SHORTCODE
143 #define fGEN_TCG_L2_loadrh_pbr(SHORTCODE)      SHORTCODE
144 #define fGEN_TCG_L2_loadrh_pi(SHORTCODE)       SHORTCODE
145 #define fGEN_TCG_L2_loadri_pr(SHORTCODE)       SHORTCODE
146 #define fGEN_TCG_L2_loadri_pbr(SHORTCODE)      SHORTCODE
147 #define fGEN_TCG_L2_loadri_pi(SHORTCODE)       SHORTCODE
148 #define fGEN_TCG_L2_loadrd_pr(SHORTCODE)       SHORTCODE
149 #define fGEN_TCG_L2_loadrd_pbr(SHORTCODE)      SHORTCODE
150 #define fGEN_TCG_L2_loadrd_pi(SHORTCODE)       SHORTCODE
151 
152 /*
153  * These instructions load 2 bytes and places them in
154  * two halves of the destination register.
155  * The GET_EA macro determines the addressing mode.
156  * The SIGN argument determines whether to zero-extend or
157  * sign-extend.
158  */
159 #define fGEN_TCG_loadbXw2(GET_EA, SIGN) \
160     do { \
161         TCGv tmp = tcg_temp_new(); \
162         TCGv byte = tcg_temp_new(); \
163         GET_EA; \
164         fLOAD(1, 2, u, EA, tmp); \
165         tcg_gen_movi_tl(RdV, 0); \
166         for (int i = 0; i < 2; i++) { \
167             gen_set_half(i, RdV, gen_get_byte(byte, i, tmp, (SIGN))); \
168         } \
169     } while (0)
170 
171 #define fGEN_TCG_L2_loadbzw2_io(SHORTCODE) \
172     fGEN_TCG_loadbXw2(fEA_RI(RsV, siV), false)
173 #define fGEN_TCG_L4_loadbzw2_ur(SHORTCODE) \
174     fGEN_TCG_loadbXw2(fEA_IRs(UiV, RtV, uiV), false)
175 #define fGEN_TCG_L2_loadbsw2_io(SHORTCODE) \
176     fGEN_TCG_loadbXw2(fEA_RI(RsV, siV), true)
177 #define fGEN_TCG_L4_loadbsw2_ur(SHORTCODE) \
178     fGEN_TCG_loadbXw2(fEA_IRs(UiV, RtV, uiV), true)
179 #define fGEN_TCG_L4_loadbzw2_ap(SHORTCODE) \
180     fGEN_TCG_loadbXw2(GET_EA_ap, false)
181 #define fGEN_TCG_L2_loadbzw2_pr(SHORTCODE) \
182     fGEN_TCG_loadbXw2(GET_EA_pr, false)
183 #define fGEN_TCG_L2_loadbzw2_pbr(SHORTCODE) \
184     fGEN_TCG_loadbXw2(GET_EA_pbr, false)
185 #define fGEN_TCG_L2_loadbzw2_pi(SHORTCODE) \
186     fGEN_TCG_loadbXw2(GET_EA_pi, false)
187 #define fGEN_TCG_L4_loadbsw2_ap(SHORTCODE) \
188     fGEN_TCG_loadbXw2(GET_EA_ap, true)
189 #define fGEN_TCG_L2_loadbsw2_pr(SHORTCODE) \
190     fGEN_TCG_loadbXw2(GET_EA_pr, true)
191 #define fGEN_TCG_L2_loadbsw2_pbr(SHORTCODE) \
192     fGEN_TCG_loadbXw2(GET_EA_pbr, true)
193 #define fGEN_TCG_L2_loadbsw2_pi(SHORTCODE) \
194     fGEN_TCG_loadbXw2(GET_EA_pi, true)
195 #define fGEN_TCG_L2_loadbzw2_pci(SHORTCODE) \
196     fGEN_TCG_loadbXw2(GET_EA_pci, false)
197 #define fGEN_TCG_L2_loadbsw2_pci(SHORTCODE) \
198     fGEN_TCG_loadbXw2(GET_EA_pci, true)
199 #define fGEN_TCG_L2_loadbzw2_pcr(SHORTCODE) \
200     fGEN_TCG_loadbXw2(GET_EA_pcr(1), false)
201 #define fGEN_TCG_L2_loadbsw2_pcr(SHORTCODE) \
202     fGEN_TCG_loadbXw2(GET_EA_pcr(1), true)
203 
204 /*
205  * These instructions load 4 bytes and places them in
206  * four halves of the destination register pair.
207  * The GET_EA macro determines the addressing mode.
208  * The SIGN argument determines whether to zero-extend or
209  * sign-extend.
210  */
211 #define fGEN_TCG_loadbXw4(GET_EA, SIGN) \
212     do { \
213         TCGv tmp = tcg_temp_new(); \
214         TCGv byte = tcg_temp_new(); \
215         GET_EA; \
216         fLOAD(1, 4, u, EA, tmp);  \
217         tcg_gen_movi_i64(RddV, 0); \
218         for (int i = 0; i < 4; i++) { \
219             gen_set_half_i64(i, RddV, gen_get_byte(byte, i, tmp, (SIGN)));  \
220         }  \
221     } while (0)
222 
223 #define fGEN_TCG_L2_loadbzw4_io(SHORTCODE) \
224     fGEN_TCG_loadbXw4(fEA_RI(RsV, siV), false)
225 #define fGEN_TCG_L4_loadbzw4_ur(SHORTCODE) \
226     fGEN_TCG_loadbXw4(fEA_IRs(UiV, RtV, uiV), false)
227 #define fGEN_TCG_L2_loadbsw4_io(SHORTCODE) \
228     fGEN_TCG_loadbXw4(fEA_RI(RsV, siV), true)
229 #define fGEN_TCG_L4_loadbsw4_ur(SHORTCODE) \
230     fGEN_TCG_loadbXw4(fEA_IRs(UiV, RtV, uiV), true)
231 #define fGEN_TCG_L2_loadbzw4_pci(SHORTCODE) \
232     fGEN_TCG_loadbXw4(GET_EA_pci, false)
233 #define fGEN_TCG_L2_loadbsw4_pci(SHORTCODE) \
234     fGEN_TCG_loadbXw4(GET_EA_pci, true)
235 #define fGEN_TCG_L2_loadbzw4_pcr(SHORTCODE) \
236     fGEN_TCG_loadbXw4(GET_EA_pcr(2), false)
237 #define fGEN_TCG_L2_loadbsw4_pcr(SHORTCODE) \
238     fGEN_TCG_loadbXw4(GET_EA_pcr(2), true)
239 #define fGEN_TCG_L4_loadbzw4_ap(SHORTCODE) \
240     fGEN_TCG_loadbXw4(GET_EA_ap, false)
241 #define fGEN_TCG_L2_loadbzw4_pr(SHORTCODE) \
242     fGEN_TCG_loadbXw4(GET_EA_pr, false)
243 #define fGEN_TCG_L2_loadbzw4_pbr(SHORTCODE) \
244     fGEN_TCG_loadbXw4(GET_EA_pbr, false)
245 #define fGEN_TCG_L2_loadbzw4_pi(SHORTCODE) \
246     fGEN_TCG_loadbXw4(GET_EA_pi, false)
247 #define fGEN_TCG_L4_loadbsw4_ap(SHORTCODE) \
248     fGEN_TCG_loadbXw4(GET_EA_ap, true)
249 #define fGEN_TCG_L2_loadbsw4_pr(SHORTCODE) \
250     fGEN_TCG_loadbXw4(GET_EA_pr, true)
251 #define fGEN_TCG_L2_loadbsw4_pbr(SHORTCODE) \
252     fGEN_TCG_loadbXw4(GET_EA_pbr, true)
253 #define fGEN_TCG_L2_loadbsw4_pi(SHORTCODE) \
254     fGEN_TCG_loadbXw4(GET_EA_pi, true)
255 
256 /*
257  * These instructions load a half word, shift the destination right by 16 bits
258  * and place the loaded value in the high half word of the destination pair.
259  * The GET_EA macro determines the addressing mode.
260  */
261 #define fGEN_TCG_loadalignh(GET_EA) \
262     do { \
263         TCGv tmp = tcg_temp_new(); \
264         TCGv_i64 tmp_i64 = tcg_temp_new_i64(); \
265         GET_EA;  \
266         fLOAD(1, 2, u, EA, tmp);  \
267         tcg_gen_extu_i32_i64(tmp_i64, tmp); \
268         tcg_gen_shri_i64(RyyV, RyyV, 16); \
269         tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 48, 16); \
270     } while (0)
271 
272 #define fGEN_TCG_L4_loadalignh_ur(SHORTCODE) \
273     fGEN_TCG_loadalignh(fEA_IRs(UiV, RtV, uiV))
274 #define fGEN_TCG_L2_loadalignh_io(SHORTCODE) \
275     fGEN_TCG_loadalignh(fEA_RI(RsV, siV))
276 #define fGEN_TCG_L2_loadalignh_pci(SHORTCODE) \
277     fGEN_TCG_loadalignh(GET_EA_pci)
278 #define fGEN_TCG_L2_loadalignh_pcr(SHORTCODE) \
279     fGEN_TCG_loadalignh(GET_EA_pcr(1))
280 #define fGEN_TCG_L4_loadalignh_ap(SHORTCODE) \
281     fGEN_TCG_loadalignh(GET_EA_ap)
282 #define fGEN_TCG_L2_loadalignh_pr(SHORTCODE) \
283     fGEN_TCG_loadalignh(GET_EA_pr)
284 #define fGEN_TCG_L2_loadalignh_pbr(SHORTCODE) \
285     fGEN_TCG_loadalignh(GET_EA_pbr)
286 #define fGEN_TCG_L2_loadalignh_pi(SHORTCODE) \
287     fGEN_TCG_loadalignh(GET_EA_pi)
288 
289 /* Same as above, but loads a byte instead of half word */
290 #define fGEN_TCG_loadalignb(GET_EA) \
291     do { \
292         TCGv tmp = tcg_temp_new(); \
293         TCGv_i64 tmp_i64 = tcg_temp_new_i64(); \
294         GET_EA;  \
295         fLOAD(1, 1, u, EA, tmp);  \
296         tcg_gen_extu_i32_i64(tmp_i64, tmp); \
297         tcg_gen_shri_i64(RyyV, RyyV, 8); \
298         tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 56, 8); \
299     } while (0)
300 
301 #define fGEN_TCG_L2_loadalignb_io(SHORTCODE) \
302     fGEN_TCG_loadalignb(fEA_RI(RsV, siV))
303 #define fGEN_TCG_L4_loadalignb_ur(SHORTCODE) \
304     fGEN_TCG_loadalignb(fEA_IRs(UiV, RtV, uiV))
305 #define fGEN_TCG_L2_loadalignb_pci(SHORTCODE) \
306     fGEN_TCG_loadalignb(GET_EA_pci)
307 #define fGEN_TCG_L2_loadalignb_pcr(SHORTCODE) \
308     fGEN_TCG_loadalignb(GET_EA_pcr(0))
309 #define fGEN_TCG_L4_loadalignb_ap(SHORTCODE) \
310     fGEN_TCG_loadalignb(GET_EA_ap)
311 #define fGEN_TCG_L2_loadalignb_pr(SHORTCODE) \
312     fGEN_TCG_loadalignb(GET_EA_pr)
313 #define fGEN_TCG_L2_loadalignb_pbr(SHORTCODE) \
314     fGEN_TCG_loadalignb(GET_EA_pbr)
315 #define fGEN_TCG_L2_loadalignb_pi(SHORTCODE) \
316     fGEN_TCG_loadalignb(GET_EA_pi)
317 
318 /*
319  * Predicated loads
320  * Here is a primer to understand the tag names
321  *
322  * Predicate used
323  *      t        true "old" value                  if (p0) r0 = memb(r2+#0)
324  *      f        false "old" value                 if (!p0) r0 = memb(r2+#0)
325  *      tnew     true "new" value                  if (p0.new) r0 = memb(r2+#0)
326  *      fnew     false "new" value                 if (!p0.new) r0 = memb(r2+#0)
327  */
328 #define fGEN_TCG_PRED_LOAD(GET_EA, PRED, SIZE, SIGN) \
329     do { \
330         TCGv LSB = tcg_temp_new(); \
331         TCGLabel *label = gen_new_label(); \
332         tcg_gen_movi_tl(EA, 0); \
333         PRED;  \
334         CHECK_NOSHUF_PRED(GET_EA, SIZE, LSB); \
335         tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \
336         fLOAD(1, SIZE, SIGN, EA, RdV); \
337         gen_set_label(label); \
338     } while (0)
339 
340 #define fGEN_TCG_L2_ploadrubt_pi(SHORTCODE) \
341     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 1, u)
342 #define fGEN_TCG_L2_ploadrubf_pi(SHORTCODE) \
343     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 1, u)
344 #define fGEN_TCG_L2_ploadrubtnew_pi(SHORTCODE) \
345     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 1, u)
346 #define fGEN_TCG_L2_ploadrubfnew_pi(SHORTCODE) \
347     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 1, u)
348 #define fGEN_TCG_L2_ploadrbt_pi(SHORTCODE) \
349     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 1, s)
350 #define fGEN_TCG_L2_ploadrbf_pi(SHORTCODE) \
351     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 1, s)
352 #define fGEN_TCG_L2_ploadrbtnew_pi(SHORTCODE) \
353     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 1, s)
354 #define fGEN_TCG_L2_ploadrbfnew_pi(SHORTCODE) \
355     fGEN_TCG_PRED_LOAD({ fEA_REG(RxV); fPM_I(RxV, siV); }, \
356                        fLSBNEWNOT(PtN), 1, s)
357 
358 #define fGEN_TCG_L2_ploadruht_pi(SHORTCODE) \
359     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 2, u)
360 #define fGEN_TCG_L2_ploadruhf_pi(SHORTCODE) \
361     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 2, u)
362 #define fGEN_TCG_L2_ploadruhtnew_pi(SHORTCODE) \
363     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 2, u)
364 #define fGEN_TCG_L2_ploadruhfnew_pi(SHORTCODE) \
365     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 2, u)
366 #define fGEN_TCG_L2_ploadrht_pi(SHORTCODE) \
367     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 2, s)
368 #define fGEN_TCG_L2_ploadrhf_pi(SHORTCODE) \
369     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 2, s)
370 #define fGEN_TCG_L2_ploadrhtnew_pi(SHORTCODE) \
371     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 2, s)
372 #define fGEN_TCG_L2_ploadrhfnew_pi(SHORTCODE) \
373     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 2, s)
374 
375 #define fGEN_TCG_L2_ploadrit_pi(SHORTCODE) \
376     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 4, u)
377 #define fGEN_TCG_L2_ploadrif_pi(SHORTCODE) \
378     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 4, u)
379 #define fGEN_TCG_L2_ploadritnew_pi(SHORTCODE) \
380     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 4, u)
381 #define fGEN_TCG_L2_ploadrifnew_pi(SHORTCODE) \
382     fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 4, u)
383 
384 /* Predicated loads into a register pair */
385 #define fGEN_TCG_PRED_LOAD_PAIR(GET_EA, PRED) \
386     do { \
387         TCGv LSB = tcg_temp_new(); \
388         TCGLabel *label = gen_new_label(); \
389         tcg_gen_movi_tl(EA, 0); \
390         PRED;  \
391         CHECK_NOSHUF_PRED(GET_EA, 8, LSB); \
392         tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \
393         fLOAD(1, 8, u, EA, RddV); \
394         gen_set_label(label); \
395     } while (0)
396 
397 #define fGEN_TCG_L2_ploadrdt_pi(SHORTCODE) \
398     fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBOLD(PtV))
399 #define fGEN_TCG_L2_ploadrdf_pi(SHORTCODE) \
400     fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBOLDNOT(PtV))
401 #define fGEN_TCG_L2_ploadrdtnew_pi(SHORTCODE) \
402     fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBNEW(PtN))
403 #define fGEN_TCG_L2_ploadrdfnew_pi(SHORTCODE) \
404     fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBNEWNOT(PtN))
405 
406 /* load-locked and store-locked */
407 #define fGEN_TCG_L2_loadw_locked(SHORTCODE) \
408     SHORTCODE
409 #define fGEN_TCG_L4_loadd_locked(SHORTCODE) \
410     SHORTCODE
411 #define fGEN_TCG_S2_storew_locked(SHORTCODE) \
412     SHORTCODE
413 #define fGEN_TCG_S4_stored_locked(SHORTCODE) \
414     SHORTCODE
415 
416 #define fGEN_TCG_STORE(SHORTCODE) \
417     do { \
418         TCGv HALF G_GNUC_UNUSED = tcg_temp_new(); \
419         TCGv BYTE G_GNUC_UNUSED = tcg_temp_new(); \
420         SHORTCODE; \
421     } while (0)
422 
423 #define fGEN_TCG_STORE_pcr(SHIFT, STORE) \
424     do { \
425         TCGv ireg = tcg_temp_new(); \
426         TCGv HALF G_GNUC_UNUSED = tcg_temp_new(); \
427         TCGv BYTE G_GNUC_UNUSED = tcg_temp_new(); \
428         tcg_gen_mov_tl(EA, RxV); \
429         gen_read_ireg(ireg, MuV, SHIFT); \
430         gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
431         STORE; \
432     } while (0)
433 
434 #define fGEN_TCG_S2_storerb_pbr(SHORTCODE) \
435     fGEN_TCG_STORE(SHORTCODE)
436 #define fGEN_TCG_S2_storerb_pci(SHORTCODE) \
437     fGEN_TCG_STORE(SHORTCODE)
438 #define fGEN_TCG_S2_storerb_pcr(SHORTCODE) \
439     fGEN_TCG_STORE_pcr(0, fSTORE(1, 1, EA, fGETBYTE(0, RtV)))
440 
441 #define fGEN_TCG_S2_storerh_pbr(SHORTCODE) \
442     fGEN_TCG_STORE(SHORTCODE)
443 #define fGEN_TCG_S2_storerh_pci(SHORTCODE) \
444     fGEN_TCG_STORE(SHORTCODE)
445 #define fGEN_TCG_S2_storerh_pcr(SHORTCODE) \
446     fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(0, RtV)))
447 
448 #define fGEN_TCG_S2_storerf_pbr(SHORTCODE) \
449     fGEN_TCG_STORE(SHORTCODE)
450 #define fGEN_TCG_S2_storerf_pci(SHORTCODE) \
451     fGEN_TCG_STORE(SHORTCODE)
452 #define fGEN_TCG_S2_storerf_pcr(SHORTCODE) \
453     fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(1, RtV)))
454 
455 #define fGEN_TCG_S2_storeri_pbr(SHORTCODE) \
456     fGEN_TCG_STORE(SHORTCODE)
457 #define fGEN_TCG_S2_storeri_pci(SHORTCODE) \
458     fGEN_TCG_STORE(SHORTCODE)
459 #define fGEN_TCG_S2_storeri_pcr(SHORTCODE) \
460     fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, RtV))
461 
462 #define fGEN_TCG_S2_storerd_pbr(SHORTCODE) \
463     fGEN_TCG_STORE(SHORTCODE)
464 #define fGEN_TCG_S2_storerd_pci(SHORTCODE) \
465     fGEN_TCG_STORE(SHORTCODE)
466 #define fGEN_TCG_S2_storerd_pcr(SHORTCODE) \
467     fGEN_TCG_STORE_pcr(3, fSTORE(1, 8, EA, RttV))
468 
469 #define fGEN_TCG_S2_storerbnew_pbr(SHORTCODE) \
470     fGEN_TCG_STORE(SHORTCODE)
471 #define fGEN_TCG_S2_storerbnew_pci(SHORTCODE) \
472     fGEN_TCG_STORE(SHORTCODE)
473 #define fGEN_TCG_S2_storerbnew_pcr(SHORTCODE) \
474     fGEN_TCG_STORE_pcr(0, fSTORE(1, 1, EA, fGETBYTE(0, NtN)))
475 
476 #define fGEN_TCG_S2_storerhnew_pbr(SHORTCODE) \
477     fGEN_TCG_STORE(SHORTCODE)
478 #define fGEN_TCG_S2_storerhnew_pci(SHORTCODE) \
479     fGEN_TCG_STORE(SHORTCODE)
480 #define fGEN_TCG_S2_storerhnew_pcr(SHORTCODE) \
481     fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(0, NtN)))
482 
483 #define fGEN_TCG_S2_storerinew_pbr(SHORTCODE) \
484     fGEN_TCG_STORE(SHORTCODE)
485 #define fGEN_TCG_S2_storerinew_pci(SHORTCODE) \
486     fGEN_TCG_STORE(SHORTCODE)
487 #define fGEN_TCG_S2_storerinew_pcr(SHORTCODE) \
488     fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, NtN))
489 
490 /* dczeroa clears the 32 byte cache line at the address given */
491 #define fGEN_TCG_Y2_dczeroa(SHORTCODE) SHORTCODE
492 
493 /* In linux-user mode, these are not modelled, suppress compiler warning */
494 #define fGEN_TCG_Y2_dcinva(SHORTCODE) \
495     do { RsV = RsV; } while (0)
496 #define fGEN_TCG_Y2_dccleaninva(SHORTCODE) \
497     do { RsV = RsV; } while (0)
498 #define fGEN_TCG_Y2_dccleana(SHORTCODE) \
499     do { RsV = RsV; } while (0)
500 #define fGEN_TCG_Y2_icinva(SHORTCODE) \
501     do { RsV = RsV; } while (0)
502 
503 /*
504  * dealloc_return
505  * Assembler mapped to
506  * r31:30 = dealloc_return(r30):raw
507  */
508 #define fGEN_TCG_L4_return(SHORTCODE) \
509     gen_return(ctx, RddV, RsV)
510 
511 /*
512  * sub-instruction version (no RddV, so handle it manually)
513  */
514 #define fGEN_TCG_SL2_return(SHORTCODE) \
515     do { \
516         TCGv_i64 RddV = get_result_gpr_pair(ctx, HEX_REG_FP); \
517         gen_return(ctx, RddV, hex_gpr[HEX_REG_FP]); \
518         gen_log_reg_write_pair(HEX_REG_FP, RddV); \
519     } while (0)
520 
521 /*
522  * Conditional returns follow this naming convention
523  *     _t                 predicate true
524  *     _f                 predicate false
525  *     _tnew_pt           predicate.new true predict taken
526  *     _fnew_pt           predicate.new false predict taken
527  *     _tnew_pnt          predicate.new true predict not taken
528  *     _fnew_pnt          predicate.new false predict not taken
529  * Predictions are not modelled in QEMU
530  *
531  * Example:
532  *     if (p1) r31:30 = dealloc_return(r30):raw
533  */
534 #define fGEN_TCG_L4_return_t(SHORTCODE) \
535     gen_cond_return(ctx, RddV, RsV, PvV, TCG_COND_EQ);
536 #define fGEN_TCG_L4_return_f(SHORTCODE) \
537     gen_cond_return(ctx, RddV, RsV, PvV, TCG_COND_NE)
538 #define fGEN_TCG_L4_return_tnew_pt(SHORTCODE) \
539     gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_EQ)
540 #define fGEN_TCG_L4_return_fnew_pt(SHORTCODE) \
541     gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_NE)
542 #define fGEN_TCG_L4_return_tnew_pnt(SHORTCODE) \
543     gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_EQ)
544 #define fGEN_TCG_L4_return_fnew_pnt(SHORTCODE) \
545     gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_NE)
546 
547 #define fGEN_TCG_SL2_return_t(SHORTCODE) \
548     gen_cond_return_subinsn(ctx, TCG_COND_EQ, hex_pred[0])
549 #define fGEN_TCG_SL2_return_f(SHORTCODE) \
550     gen_cond_return_subinsn(ctx, TCG_COND_NE, hex_pred[0])
551 #define fGEN_TCG_SL2_return_tnew(SHORTCODE) \
552     gen_cond_return_subinsn(ctx, TCG_COND_EQ, hex_new_pred_value[0])
553 #define fGEN_TCG_SL2_return_fnew(SHORTCODE) \
554     gen_cond_return_subinsn(ctx, TCG_COND_NE, hex_new_pred_value[0])
555 
556 /*
557  * Mathematical operations with more than one definition require
558  * special handling
559  */
560 #define fGEN_TCG_A5_ACS(SHORTCODE) \
561     do { \
562         gen_helper_vacsh_pred(PeV, cpu_env, RxxV, RssV, RttV); \
563         gen_helper_vacsh_val(RxxV, cpu_env, RxxV, RssV, RttV); \
564     } while (0)
565 
566 /*
567  * Approximate reciprocal
568  * r3,p1 = sfrecipa(r0, r1)
569  *
570  * The helper packs the 2 32-bit results into a 64-bit value,
571  * so unpack them into the proper results.
572  */
573 #define fGEN_TCG_F2_sfrecipa(SHORTCODE) \
574     do { \
575         TCGv_i64 tmp = tcg_temp_new_i64(); \
576         gen_helper_sfrecipa(tmp, cpu_env, RsV, RtV);  \
577         tcg_gen_extrh_i64_i32(RdV, tmp); \
578         tcg_gen_extrl_i64_i32(PeV, tmp); \
579     } while (0)
580 
581 /*
582  * Approximation of the reciprocal square root
583  * r1,p0 = sfinvsqrta(r0)
584  *
585  * The helper packs the 2 32-bit results into a 64-bit value,
586  * so unpack them into the proper results.
587  */
588 #define fGEN_TCG_F2_sfinvsqrta(SHORTCODE) \
589     do { \
590         TCGv_i64 tmp = tcg_temp_new_i64(); \
591         gen_helper_sfinvsqrta(tmp, cpu_env, RsV); \
592         tcg_gen_extrh_i64_i32(RdV, tmp); \
593         tcg_gen_extrl_i64_i32(PeV, tmp); \
594     } while (0)
595 
596 /*
597  * Add or subtract with carry.
598  * Predicate register is used as an extra input and output.
599  * r5:4 = add(r1:0, r3:2, p1):carry
600  */
601 #define fGEN_TCG_A4_addp_c(SHORTCODE) \
602     do { \
603         TCGv_i64 carry = tcg_temp_new_i64(); \
604         TCGv_i64 zero = tcg_constant_i64(0); \
605         tcg_gen_extu_i32_i64(carry, PxV); \
606         tcg_gen_andi_i64(carry, carry, 1); \
607         tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \
608         tcg_gen_add2_i64(RddV, carry, RddV, carry, RttV, zero); \
609         tcg_gen_extrl_i64_i32(PxV, carry); \
610         gen_8bitsof(PxV, PxV); \
611     } while (0)
612 
613 /* r5:4 = sub(r1:0, r3:2, p1):carry */
614 #define fGEN_TCG_A4_subp_c(SHORTCODE) \
615     do { \
616         TCGv_i64 carry = tcg_temp_new_i64(); \
617         TCGv_i64 zero = tcg_constant_i64(0); \
618         TCGv_i64 not_RttV = tcg_temp_new_i64(); \
619         tcg_gen_extu_i32_i64(carry, PxV); \
620         tcg_gen_andi_i64(carry, carry, 1); \
621         tcg_gen_not_i64(not_RttV, RttV); \
622         tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \
623         tcg_gen_add2_i64(RddV, carry, RddV, carry, not_RttV, zero); \
624         tcg_gen_extrl_i64_i32(PxV, carry); \
625         gen_8bitsof(PxV, PxV); \
626     } while (0)
627 
628 /*
629  * Compare each of the 8 unsigned bytes
630  * The minimum is placed in each byte of the destination.
631  * Each bit of the predicate is set true if the bit from the first operand
632  * is greater than the bit from the second operand.
633  * r5:4,p1 = vminub(r1:0, r3:2)
634  */
635 #define fGEN_TCG_A6_vminub_RdP(SHORTCODE) \
636     do { \
637         TCGv left = tcg_temp_new(); \
638         TCGv right = tcg_temp_new(); \
639         TCGv tmp = tcg_temp_new(); \
640         tcg_gen_movi_tl(PeV, 0); \
641         tcg_gen_movi_i64(RddV, 0); \
642         for (int i = 0; i < 8; i++) { \
643             gen_get_byte_i64(left, i, RttV, false); \
644             gen_get_byte_i64(right, i, RssV, false); \
645             tcg_gen_setcond_tl(TCG_COND_GT, tmp, left, right); \
646             tcg_gen_deposit_tl(PeV, PeV, tmp, i, 1); \
647             tcg_gen_umin_tl(tmp, left, right); \
648             gen_set_byte_i64(i, RddV, tmp); \
649         } \
650     } while (0)
651 
652 #define fGEN_TCG_J2_call(SHORTCODE) \
653     gen_call(ctx, riV)
654 #define fGEN_TCG_J2_callr(SHORTCODE) \
655     gen_callr(ctx, RsV)
656 
657 #define fGEN_TCG_J2_callt(SHORTCODE) \
658     gen_cond_call(ctx, PuV, TCG_COND_EQ, riV)
659 #define fGEN_TCG_J2_callf(SHORTCODE) \
660     gen_cond_call(ctx, PuV, TCG_COND_NE, riV)
661 #define fGEN_TCG_J2_callrt(SHORTCODE) \
662     gen_cond_callr(ctx, TCG_COND_EQ, PuV, RsV)
663 #define fGEN_TCG_J2_callrf(SHORTCODE) \
664     gen_cond_callr(ctx, TCG_COND_NE, PuV, RsV)
665 
666 #define fGEN_TCG_J2_endloop0(SHORTCODE) \
667     gen_endloop0(ctx)
668 #define fGEN_TCG_J2_endloop1(SHORTCODE) \
669     gen_endloop1(ctx)
670 #define fGEN_TCG_J2_endloop01(SHORTCODE) \
671     gen_endloop01(ctx)
672 
673 /*
674  * Compound compare and jump instructions
675  * Here is a primer to understand the tag names
676  *
677  * Comparison
678  *      cmpeqi   compare equal to an immediate
679  *      cmpgti   compare greater than an immediate
680  *      cmpgtiu  compare greater than an unsigned immediate
681  *      cmpeqn1  compare equal to negative 1
682  *      cmpgtn1  compare greater than negative 1
683  *      cmpeq    compare equal (two registers)
684  *      cmpgtu   compare greater than unsigned (two registers)
685  *      tstbit0  test bit zero
686  *
687  * Condition
688  *      tp0      p0 is true     p0 = cmp.eq(r0,#5); if (p0.new) jump:nt address
689  *      fp0      p0 is false    p0 = cmp.eq(r0,#5); if (!p0.new) jump:nt address
690  *      tp1      p1 is true     p1 = cmp.eq(r0,#5); if (p1.new) jump:nt address
691  *      fp1      p1 is false    p1 = cmp.eq(r0,#5); if (!p1.new) jump:nt address
692  *
693  * Prediction (not modelled in qemu)
694  *      _nt      not taken
695  *      _t       taken
696  */
697 #define fGEN_TCG_J4_cmpeq_tp0_jump_t(SHORTCODE) \
698     gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
699 #define fGEN_TCG_J4_cmpeq_tp0_jump_nt(SHORTCODE) \
700     gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
701 #define fGEN_TCG_J4_cmpeq_fp0_jump_t(SHORTCODE) \
702     gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
703 #define fGEN_TCG_J4_cmpeq_fp0_jump_nt(SHORTCODE) \
704     gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
705 #define fGEN_TCG_J4_cmpeq_tp1_jump_t(SHORTCODE) \
706     gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
707 #define fGEN_TCG_J4_cmpeq_tp1_jump_nt(SHORTCODE) \
708     gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
709 #define fGEN_TCG_J4_cmpeq_fp1_jump_t(SHORTCODE) \
710     gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
711 #define fGEN_TCG_J4_cmpeq_fp1_jump_nt(SHORTCODE) \
712     gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
713 
714 #define fGEN_TCG_J4_cmpgt_tp0_jump_t(SHORTCODE) \
715     gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
716 #define fGEN_TCG_J4_cmpgt_tp0_jump_nt(SHORTCODE) \
717     gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
718 #define fGEN_TCG_J4_cmpgt_fp0_jump_t(SHORTCODE) \
719     gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
720 #define fGEN_TCG_J4_cmpgt_fp0_jump_nt(SHORTCODE) \
721     gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
722 #define fGEN_TCG_J4_cmpgt_tp1_jump_t(SHORTCODE) \
723     gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
724 #define fGEN_TCG_J4_cmpgt_tp1_jump_nt(SHORTCODE) \
725     gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
726 #define fGEN_TCG_J4_cmpgt_fp1_jump_t(SHORTCODE) \
727     gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
728 #define fGEN_TCG_J4_cmpgt_fp1_jump_nt(SHORTCODE) \
729     gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
730 
731 #define fGEN_TCG_J4_cmpgtu_tp0_jump_t(SHORTCODE) \
732     gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
733 #define fGEN_TCG_J4_cmpgtu_tp0_jump_nt(SHORTCODE) \
734     gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
735 #define fGEN_TCG_J4_cmpgtu_fp0_jump_t(SHORTCODE) \
736     gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
737 #define fGEN_TCG_J4_cmpgtu_fp0_jump_nt(SHORTCODE) \
738     gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
739 #define fGEN_TCG_J4_cmpgtu_tp1_jump_t(SHORTCODE) \
740     gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
741 #define fGEN_TCG_J4_cmpgtu_tp1_jump_nt(SHORTCODE) \
742     gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
743 #define fGEN_TCG_J4_cmpgtu_fp1_jump_t(SHORTCODE) \
744     gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
745 #define fGEN_TCG_J4_cmpgtu_fp1_jump_nt(SHORTCODE) \
746     gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
747 
748 #define fGEN_TCG_J4_cmpeqi_tp0_jump_t(SHORTCODE) \
749     gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
750 #define fGEN_TCG_J4_cmpeqi_tp0_jump_nt(SHORTCODE) \
751     gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
752 #define fGEN_TCG_J4_cmpeqi_fp0_jump_t(SHORTCODE) \
753     gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
754 #define fGEN_TCG_J4_cmpeqi_fp0_jump_nt(SHORTCODE) \
755     gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
756 #define fGEN_TCG_J4_cmpeqi_tp1_jump_t(SHORTCODE) \
757     gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
758 #define fGEN_TCG_J4_cmpeqi_tp1_jump_nt(SHORTCODE) \
759     gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
760 #define fGEN_TCG_J4_cmpeqi_fp1_jump_t(SHORTCODE) \
761     gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
762 #define fGEN_TCG_J4_cmpeqi_fp1_jump_nt(SHORTCODE) \
763     gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
764 
765 #define fGEN_TCG_J4_cmpgti_tp0_jump_t(SHORTCODE) \
766     gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
767 #define fGEN_TCG_J4_cmpgti_tp0_jump_nt(SHORTCODE) \
768     gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
769 #define fGEN_TCG_J4_cmpgti_fp0_jump_t(SHORTCODE) \
770     gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
771 #define fGEN_TCG_J4_cmpgti_fp0_jump_nt(SHORTCODE) \
772     gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
773 #define fGEN_TCG_J4_cmpgti_tp1_jump_t(SHORTCODE) \
774     gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
775 #define fGEN_TCG_J4_cmpgti_tp1_jump_nt(SHORTCODE) \
776     gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
777 #define fGEN_TCG_J4_cmpgti_fp1_jump_t(SHORTCODE) \
778     gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
779 #define fGEN_TCG_J4_cmpgti_fp1_jump_nt(SHORTCODE) \
780     gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
781 
782 #define fGEN_TCG_J4_cmpgtui_tp0_jump_t(SHORTCODE) \
783     gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
784 #define fGEN_TCG_J4_cmpgtui_tp0_jump_nt(SHORTCODE) \
785     gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
786 #define fGEN_TCG_J4_cmpgtui_fp0_jump_t(SHORTCODE) \
787     gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
788 #define fGEN_TCG_J4_cmpgtui_fp0_jump_nt(SHORTCODE) \
789     gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
790 #define fGEN_TCG_J4_cmpgtui_tp1_jump_t(SHORTCODE) \
791     gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
792 #define fGEN_TCG_J4_cmpgtui_tp1_jump_nt(SHORTCODE) \
793     gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
794 #define fGEN_TCG_J4_cmpgtui_fp1_jump_t(SHORTCODE) \
795     gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
796 #define fGEN_TCG_J4_cmpgtui_fp1_jump_nt(SHORTCODE) \
797     gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
798 
799 #define fGEN_TCG_J4_cmpeqn1_tp0_jump_t(SHORTCODE) \
800     gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_EQ, RsV, riV)
801 #define fGEN_TCG_J4_cmpeqn1_tp0_jump_nt(SHORTCODE) \
802     gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_EQ, RsV, riV)
803 #define fGEN_TCG_J4_cmpeqn1_fp0_jump_t(SHORTCODE) \
804     gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_EQ, RsV, riV)
805 #define fGEN_TCG_J4_cmpeqn1_fp0_jump_nt(SHORTCODE) \
806     gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_EQ, RsV, riV)
807 #define fGEN_TCG_J4_cmpeqn1_tp1_jump_t(SHORTCODE) \
808     gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_EQ, RsV, riV)
809 #define fGEN_TCG_J4_cmpeqn1_tp1_jump_nt(SHORTCODE) \
810     gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_EQ, RsV, riV)
811 #define fGEN_TCG_J4_cmpeqn1_fp1_jump_t(SHORTCODE) \
812     gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_EQ, RsV, riV)
813 #define fGEN_TCG_J4_cmpeqn1_fp1_jump_nt(SHORTCODE) \
814     gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_EQ, RsV, riV)
815 
816 #define fGEN_TCG_J4_cmpgtn1_tp0_jump_t(SHORTCODE) \
817     gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_GT, RsV, riV)
818 #define fGEN_TCG_J4_cmpgtn1_tp0_jump_nt(SHORTCODE) \
819     gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_GT, RsV, riV)
820 #define fGEN_TCG_J4_cmpgtn1_fp0_jump_t(SHORTCODE) \
821     gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_GT, RsV, riV)
822 #define fGEN_TCG_J4_cmpgtn1_fp0_jump_nt(SHORTCODE) \
823     gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_GT, RsV, riV)
824 #define fGEN_TCG_J4_cmpgtn1_tp1_jump_t(SHORTCODE) \
825     gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_GT, RsV, riV)
826 #define fGEN_TCG_J4_cmpgtn1_tp1_jump_nt(SHORTCODE) \
827     gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_GT, RsV, riV)
828 #define fGEN_TCG_J4_cmpgtn1_fp1_jump_t(SHORTCODE) \
829     gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_GT, RsV, riV)
830 #define fGEN_TCG_J4_cmpgtn1_fp1_jump_nt(SHORTCODE) \
831     gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_GT, RsV, riV)
832 
833 #define fGEN_TCG_J4_tstbit0_tp0_jump_nt(SHORTCODE) \
834     gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_EQ, riV)
835 #define fGEN_TCG_J4_tstbit0_tp0_jump_t(SHORTCODE) \
836     gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_EQ, riV)
837 #define fGEN_TCG_J4_tstbit0_fp0_jump_nt(SHORTCODE) \
838     gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_NE, riV)
839 #define fGEN_TCG_J4_tstbit0_fp0_jump_t(SHORTCODE) \
840     gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_NE, riV)
841 #define fGEN_TCG_J4_tstbit0_tp1_jump_nt(SHORTCODE) \
842     gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_EQ, riV)
843 #define fGEN_TCG_J4_tstbit0_tp1_jump_t(SHORTCODE) \
844     gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_EQ, riV)
845 #define fGEN_TCG_J4_tstbit0_fp1_jump_nt(SHORTCODE) \
846     gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_NE, riV)
847 #define fGEN_TCG_J4_tstbit0_fp1_jump_t(SHORTCODE) \
848     gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_NE, riV)
849 
850 #define fGEN_TCG_J2_jump(SHORTCODE) \
851     gen_jump(ctx, riV)
852 #define fGEN_TCG_J2_jumpr(SHORTCODE) \
853     gen_jumpr(ctx, RsV)
854 #define fGEN_TCG_J4_jumpseti(SHORTCODE) \
855     do { \
856         tcg_gen_movi_tl(RdV, UiV); \
857         gen_jump(ctx, riV); \
858     } while (0)
859 
860 #define fGEN_TCG_cond_jumpt(COND) \
861     do { \
862         TCGv LSB = tcg_temp_new(); \
863         COND; \
864         gen_cond_jump(ctx, TCG_COND_EQ, LSB, riV); \
865     } while (0)
866 #define fGEN_TCG_cond_jumpf(COND) \
867     do { \
868         TCGv LSB = tcg_temp_new(); \
869         COND; \
870         gen_cond_jump(ctx, TCG_COND_NE, LSB, riV); \
871     } while (0)
872 
873 #define fGEN_TCG_J2_jumpt(SHORTCODE) \
874     fGEN_TCG_cond_jumpt(fLSBOLD(PuV))
875 #define fGEN_TCG_J2_jumptpt(SHORTCODE) \
876     fGEN_TCG_cond_jumpt(fLSBOLD(PuV))
877 #define fGEN_TCG_J2_jumpf(SHORTCODE) \
878     fGEN_TCG_cond_jumpf(fLSBOLD(PuV))
879 #define fGEN_TCG_J2_jumpfpt(SHORTCODE) \
880     fGEN_TCG_cond_jumpf(fLSBOLD(PuV))
881 #define fGEN_TCG_J2_jumptnew(SHORTCODE) \
882     gen_cond_jump(ctx, TCG_COND_EQ, PuN, riV)
883 #define fGEN_TCG_J2_jumptnewpt(SHORTCODE) \
884     gen_cond_jump(ctx, TCG_COND_EQ, PuN, riV)
885 #define fGEN_TCG_J2_jumpfnewpt(SHORTCODE) \
886     fGEN_TCG_cond_jumpf(fLSBNEW(PuN))
887 #define fGEN_TCG_J2_jumpfnew(SHORTCODE) \
888     fGEN_TCG_cond_jumpf(fLSBNEW(PuN))
889 #define fGEN_TCG_J2_jumprz(SHORTCODE) \
890     fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_NE, LSB, RsV, 0))
891 #define fGEN_TCG_J2_jumprzpt(SHORTCODE) \
892     fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_NE, LSB, RsV, 0))
893 #define fGEN_TCG_J2_jumprnz(SHORTCODE) \
894     fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_EQ, LSB, RsV, 0))
895 #define fGEN_TCG_J2_jumprnzpt(SHORTCODE) \
896     fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_EQ, LSB, RsV, 0))
897 #define fGEN_TCG_J2_jumprgtez(SHORTCODE) \
898     fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_GE, LSB, RsV, 0))
899 #define fGEN_TCG_J2_jumprgtezpt(SHORTCODE) \
900     fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_GE, LSB, RsV, 0))
901 #define fGEN_TCG_J2_jumprltez(SHORTCODE) \
902     fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_LE, LSB, RsV, 0))
903 #define fGEN_TCG_J2_jumprltezpt(SHORTCODE) \
904     fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_LE, LSB, RsV, 0))
905 
906 #define fGEN_TCG_cond_jumprt(COND) \
907     do { \
908         TCGv LSB = tcg_temp_new(); \
909         COND; \
910         gen_cond_jumpr(ctx, RsV, TCG_COND_EQ, LSB); \
911     } while (0)
912 #define fGEN_TCG_cond_jumprf(COND) \
913     do { \
914         TCGv LSB = tcg_temp_new(); \
915         COND; \
916         gen_cond_jumpr(ctx, RsV, TCG_COND_NE, LSB); \
917     } while (0)
918 
919 #define fGEN_TCG_J2_jumprt(SHORTCODE) \
920     fGEN_TCG_cond_jumprt(fLSBOLD(PuV))
921 #define fGEN_TCG_J2_jumprtpt(SHORTCODE) \
922     fGEN_TCG_cond_jumprt(fLSBOLD(PuV))
923 #define fGEN_TCG_J2_jumprf(SHORTCODE) \
924     fGEN_TCG_cond_jumprf(fLSBOLD(PuV))
925 #define fGEN_TCG_J2_jumprfpt(SHORTCODE) \
926     fGEN_TCG_cond_jumprf(fLSBOLD(PuV))
927 #define fGEN_TCG_J2_jumprtnew(SHORTCODE) \
928     fGEN_TCG_cond_jumprt(fLSBNEW(PuN))
929 #define fGEN_TCG_J2_jumprtnewpt(SHORTCODE) \
930     fGEN_TCG_cond_jumprt(fLSBNEW(PuN))
931 #define fGEN_TCG_J2_jumprfnew(SHORTCODE) \
932     fGEN_TCG_cond_jumprf(fLSBNEW(PuN))
933 #define fGEN_TCG_J2_jumprfnewpt(SHORTCODE) \
934     fGEN_TCG_cond_jumprf(fLSBNEW(PuN))
935 
936 /*
937  * New value compare & jump instructions
938  * if ([!]COND(r0.new, r1) jump:t address
939  * if ([!]COND(r0.new, #7) jump:t address
940  */
941 #define fGEN_TCG_J4_cmpgt_t_jumpnv_t(SHORTCODE) \
942     gen_cmp_jumpnv(ctx, TCG_COND_GT, NsN, RtV, riV)
943 #define fGEN_TCG_J4_cmpgt_t_jumpnv_nt(SHORTCODE) \
944     gen_cmp_jumpnv(ctx, TCG_COND_GT, NsN, RtV, riV)
945 #define fGEN_TCG_J4_cmpgt_f_jumpnv_t(SHORTCODE) \
946     gen_cmp_jumpnv(ctx, TCG_COND_LE, NsN, RtV, riV)
947 #define fGEN_TCG_J4_cmpgt_f_jumpnv_nt(SHORTCODE) \
948     gen_cmp_jumpnv(ctx, TCG_COND_LE, NsN, RtV, riV)
949 
950 #define fGEN_TCG_J4_cmpeq_t_jumpnv_t(SHORTCODE) \
951     gen_cmp_jumpnv(ctx, TCG_COND_EQ, NsN, RtV, riV)
952 #define fGEN_TCG_J4_cmpeq_t_jumpnv_nt(SHORTCODE) \
953     gen_cmp_jumpnv(ctx, TCG_COND_EQ, NsN, RtV, riV)
954 #define fGEN_TCG_J4_cmpeq_f_jumpnv_t(SHORTCODE) \
955     gen_cmp_jumpnv(ctx, TCG_COND_NE, NsN, RtV, riV)
956 #define fGEN_TCG_J4_cmpeq_f_jumpnv_nt(SHORTCODE) \
957     gen_cmp_jumpnv(ctx, TCG_COND_NE, NsN, RtV, riV)
958 
959 #define fGEN_TCG_J4_cmplt_t_jumpnv_t(SHORTCODE) \
960     gen_cmp_jumpnv(ctx, TCG_COND_LT, NsN, RtV, riV)
961 #define fGEN_TCG_J4_cmplt_t_jumpnv_nt(SHORTCODE) \
962     gen_cmp_jumpnv(ctx, TCG_COND_LT, NsN, RtV, riV)
963 #define fGEN_TCG_J4_cmplt_f_jumpnv_t(SHORTCODE) \
964     gen_cmp_jumpnv(ctx, TCG_COND_GE, NsN, RtV, riV)
965 #define fGEN_TCG_J4_cmplt_f_jumpnv_nt(SHORTCODE) \
966     gen_cmp_jumpnv(ctx, TCG_COND_GE, NsN, RtV, riV)
967 
968 #define fGEN_TCG_J4_cmpeqi_t_jumpnv_t(SHORTCODE) \
969     gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, UiV, riV)
970 #define fGEN_TCG_J4_cmpeqi_t_jumpnv_nt(SHORTCODE) \
971     gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, UiV, riV)
972 #define fGEN_TCG_J4_cmpeqi_f_jumpnv_t(SHORTCODE) \
973     gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, UiV, riV)
974 #define fGEN_TCG_J4_cmpeqi_f_jumpnv_nt(SHORTCODE) \
975     gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, UiV, riV)
976 
977 #define fGEN_TCG_J4_cmpgti_t_jumpnv_t(SHORTCODE) \
978     gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, UiV, riV)
979 #define fGEN_TCG_J4_cmpgti_t_jumpnv_nt(SHORTCODE) \
980     gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, UiV, riV)
981 #define fGEN_TCG_J4_cmpgti_f_jumpnv_t(SHORTCODE) \
982     gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, UiV, riV)
983 #define fGEN_TCG_J4_cmpgti_f_jumpnv_nt(SHORTCODE) \
984     gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, UiV, riV)
985 
986 #define fGEN_TCG_J4_cmpltu_t_jumpnv_t(SHORTCODE) \
987     gen_cmp_jumpnv(ctx, TCG_COND_LTU, NsN, RtV, riV)
988 #define fGEN_TCG_J4_cmpltu_t_jumpnv_nt(SHORTCODE) \
989     gen_cmp_jumpnv(ctx, TCG_COND_LTU, NsN, RtV, riV)
990 #define fGEN_TCG_J4_cmpltu_f_jumpnv_t(SHORTCODE) \
991     gen_cmp_jumpnv(ctx, TCG_COND_GEU, NsN, RtV, riV)
992 #define fGEN_TCG_J4_cmpltu_f_jumpnv_nt(SHORTCODE) \
993     gen_cmp_jumpnv(ctx, TCG_COND_GEU, NsN, RtV, riV)
994 
995 #define fGEN_TCG_J4_cmpgtui_t_jumpnv_t(SHORTCODE) \
996     gen_cmpi_jumpnv(ctx, TCG_COND_GTU, NsN, UiV, riV)
997 #define fGEN_TCG_J4_cmpgtui_t_jumpnv_nt(SHORTCODE) \
998     gen_cmpi_jumpnv(ctx, TCG_COND_GTU, NsN, UiV, riV)
999 #define fGEN_TCG_J4_cmpgtui_f_jumpnv_t(SHORTCODE) \
1000     gen_cmpi_jumpnv(ctx, TCG_COND_LEU, NsN, UiV, riV)
1001 #define fGEN_TCG_J4_cmpgtui_f_jumpnv_nt(SHORTCODE) \
1002     gen_cmpi_jumpnv(ctx, TCG_COND_LEU, NsN, UiV, riV)
1003 
1004 #define fGEN_TCG_J4_cmpgtu_t_jumpnv_t(SHORTCODE) \
1005     gen_cmp_jumpnv(ctx, TCG_COND_GTU, NsN, RtV, riV)
1006 #define fGEN_TCG_J4_cmpgtu_t_jumpnv_nt(SHORTCODE) \
1007     gen_cmp_jumpnv(ctx, TCG_COND_GTU, NsN, RtV, riV)
1008 #define fGEN_TCG_J4_cmpgtu_f_jumpnv_t(SHORTCODE) \
1009     gen_cmp_jumpnv(ctx, TCG_COND_LEU, NsN, RtV, riV)
1010 #define fGEN_TCG_J4_cmpgtu_f_jumpnv_nt(SHORTCODE) \
1011     gen_cmp_jumpnv(ctx, TCG_COND_LEU, NsN, RtV, riV)
1012 
1013 #define fGEN_TCG_J4_cmpeqn1_t_jumpnv_t(SHORTCODE) \
1014     gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, -1, riV)
1015 #define fGEN_TCG_J4_cmpeqn1_t_jumpnv_nt(SHORTCODE) \
1016     gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, -1, riV)
1017 #define fGEN_TCG_J4_cmpeqn1_f_jumpnv_t(SHORTCODE) \
1018     gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, -1, riV)
1019 #define fGEN_TCG_J4_cmpeqn1_f_jumpnv_nt(SHORTCODE) \
1020     gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, -1, riV)
1021 
1022 #define fGEN_TCG_J4_cmpgtn1_t_jumpnv_t(SHORTCODE) \
1023     gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, -1, riV)
1024 #define fGEN_TCG_J4_cmpgtn1_t_jumpnv_nt(SHORTCODE) \
1025     gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, -1, riV)
1026 #define fGEN_TCG_J4_cmpgtn1_f_jumpnv_t(SHORTCODE) \
1027     gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, -1, riV)
1028 #define fGEN_TCG_J4_cmpgtn1_f_jumpnv_nt(SHORTCODE) \
1029     gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, -1, riV)
1030 
1031 #define fGEN_TCG_J4_tstbit0_t_jumpnv_t(SHORTCODE) \
1032     gen_testbit0_jumpnv(ctx, NsN, TCG_COND_EQ, riV)
1033 #define fGEN_TCG_J4_tstbit0_t_jumpnv_nt(SHORTCODE) \
1034     gen_testbit0_jumpnv(ctx, NsN, TCG_COND_EQ, riV)
1035 #define fGEN_TCG_J4_tstbit0_f_jumpnv_t(SHORTCODE) \
1036     gen_testbit0_jumpnv(ctx, NsN, TCG_COND_NE, riV)
1037 #define fGEN_TCG_J4_tstbit0_f_jumpnv_nt(SHORTCODE) \
1038     gen_testbit0_jumpnv(ctx, NsN, TCG_COND_NE, riV)
1039 
1040 /* r0 = r1 ; jump address */
1041 #define fGEN_TCG_J4_jumpsetr(SHORTCODE) \
1042     do { \
1043         tcg_gen_mov_tl(RdV, RsV); \
1044         gen_jump(ctx, riV); \
1045     } while (0)
1046 
1047 #define fGEN_TCG_J2_pause(SHORTCODE) \
1048     do { \
1049         uiV = uiV; \
1050         tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->next_PC); \
1051     } while (0)
1052 
1053 /* r0 = asr(r1, r2):sat */
1054 #define fGEN_TCG_S2_asr_r_r_sat(SHORTCODE) \
1055     gen_asr_r_r_sat(ctx, RdV, RsV, RtV)
1056 
1057 /* r0 = asl(r1, r2):sat */
1058 #define fGEN_TCG_S2_asl_r_r_sat(SHORTCODE) \
1059     gen_asl_r_r_sat(ctx, RdV, RsV, RtV)
1060 
1061 #define fGEN_TCG_SL2_jumpr31(SHORTCODE) \
1062     gen_jumpr(ctx, hex_gpr[HEX_REG_LR])
1063 
1064 #define fGEN_TCG_SL2_jumpr31_t(SHORTCODE) \
1065     gen_cond_jumpr31(ctx, TCG_COND_EQ, hex_pred[0])
1066 #define fGEN_TCG_SL2_jumpr31_f(SHORTCODE) \
1067     gen_cond_jumpr31(ctx, TCG_COND_NE, hex_pred[0])
1068 
1069 #define fGEN_TCG_SL2_jumpr31_tnew(SHORTCODE) \
1070     gen_cond_jumpr31(ctx, TCG_COND_EQ, hex_new_pred_value[0])
1071 #define fGEN_TCG_SL2_jumpr31_fnew(SHORTCODE) \
1072     gen_cond_jumpr31(ctx, TCG_COND_NE, hex_new_pred_value[0])
1073 
1074 /* Count trailing zeros/ones */
1075 #define fGEN_TCG_S2_ct0(SHORTCODE) \
1076     do { \
1077         tcg_gen_ctzi_tl(RdV, RsV, 32); \
1078     } while (0)
1079 #define fGEN_TCG_S2_ct1(SHORTCODE) \
1080     do { \
1081         tcg_gen_not_tl(RdV, RsV); \
1082         tcg_gen_ctzi_tl(RdV, RdV, 32); \
1083     } while (0)
1084 #define fGEN_TCG_S2_ct0p(SHORTCODE) \
1085     do { \
1086         TCGv_i64 tmp = tcg_temp_new_i64(); \
1087         tcg_gen_ctzi_i64(tmp, RssV, 64); \
1088         tcg_gen_extrl_i64_i32(RdV, tmp); \
1089     } while (0)
1090 #define fGEN_TCG_S2_ct1p(SHORTCODE) \
1091     do { \
1092         TCGv_i64 tmp = tcg_temp_new_i64(); \
1093         tcg_gen_not_i64(tmp, RssV); \
1094         tcg_gen_ctzi_i64(tmp, tmp, 64); \
1095         tcg_gen_extrl_i64_i32(RdV, tmp); \
1096     } while (0)
1097 
1098 /* Floating point */
1099 #define fGEN_TCG_F2_conv_sf2df(SHORTCODE) \
1100     gen_helper_conv_sf2df(RddV, cpu_env, RsV)
1101 #define fGEN_TCG_F2_conv_df2sf(SHORTCODE) \
1102     gen_helper_conv_df2sf(RdV, cpu_env, RssV)
1103 #define fGEN_TCG_F2_conv_uw2sf(SHORTCODE) \
1104     gen_helper_conv_uw2sf(RdV, cpu_env, RsV)
1105 #define fGEN_TCG_F2_conv_uw2df(SHORTCODE) \
1106     gen_helper_conv_uw2df(RddV, cpu_env, RsV)
1107 #define fGEN_TCG_F2_conv_w2sf(SHORTCODE) \
1108     gen_helper_conv_w2sf(RdV, cpu_env, RsV)
1109 #define fGEN_TCG_F2_conv_w2df(SHORTCODE) \
1110     gen_helper_conv_w2df(RddV, cpu_env, RsV)
1111 #define fGEN_TCG_F2_conv_ud2sf(SHORTCODE) \
1112     gen_helper_conv_ud2sf(RdV, cpu_env, RssV)
1113 #define fGEN_TCG_F2_conv_ud2df(SHORTCODE) \
1114     gen_helper_conv_ud2df(RddV, cpu_env, RssV)
1115 #define fGEN_TCG_F2_conv_d2sf(SHORTCODE) \
1116     gen_helper_conv_d2sf(RdV, cpu_env, RssV)
1117 #define fGEN_TCG_F2_conv_d2df(SHORTCODE) \
1118     gen_helper_conv_d2df(RddV, cpu_env, RssV)
1119 #define fGEN_TCG_F2_conv_sf2uw(SHORTCODE) \
1120     gen_helper_conv_sf2uw(RdV, cpu_env, RsV)
1121 #define fGEN_TCG_F2_conv_sf2w(SHORTCODE) \
1122     gen_helper_conv_sf2w(RdV, cpu_env, RsV)
1123 #define fGEN_TCG_F2_conv_sf2ud(SHORTCODE) \
1124     gen_helper_conv_sf2ud(RddV, cpu_env, RsV)
1125 #define fGEN_TCG_F2_conv_sf2d(SHORTCODE) \
1126     gen_helper_conv_sf2d(RddV, cpu_env, RsV)
1127 #define fGEN_TCG_F2_conv_df2uw(SHORTCODE) \
1128     gen_helper_conv_df2uw(RdV, cpu_env, RssV)
1129 #define fGEN_TCG_F2_conv_df2w(SHORTCODE) \
1130     gen_helper_conv_df2w(RdV, cpu_env, RssV)
1131 #define fGEN_TCG_F2_conv_df2ud(SHORTCODE) \
1132     gen_helper_conv_df2ud(RddV, cpu_env, RssV)
1133 #define fGEN_TCG_F2_conv_df2d(SHORTCODE) \
1134     gen_helper_conv_df2d(RddV, cpu_env, RssV)
1135 #define fGEN_TCG_F2_conv_sf2uw_chop(SHORTCODE) \
1136     gen_helper_conv_sf2uw_chop(RdV, cpu_env, RsV)
1137 #define fGEN_TCG_F2_conv_sf2w_chop(SHORTCODE) \
1138     gen_helper_conv_sf2w_chop(RdV, cpu_env, RsV)
1139 #define fGEN_TCG_F2_conv_sf2ud_chop(SHORTCODE) \
1140     gen_helper_conv_sf2ud_chop(RddV, cpu_env, RsV)
1141 #define fGEN_TCG_F2_conv_sf2d_chop(SHORTCODE) \
1142     gen_helper_conv_sf2d_chop(RddV, cpu_env, RsV)
1143 #define fGEN_TCG_F2_conv_df2uw_chop(SHORTCODE) \
1144     gen_helper_conv_df2uw_chop(RdV, cpu_env, RssV)
1145 #define fGEN_TCG_F2_conv_df2w_chop(SHORTCODE) \
1146     gen_helper_conv_df2w_chop(RdV, cpu_env, RssV)
1147 #define fGEN_TCG_F2_conv_df2ud_chop(SHORTCODE) \
1148     gen_helper_conv_df2ud_chop(RddV, cpu_env, RssV)
1149 #define fGEN_TCG_F2_conv_df2d_chop(SHORTCODE) \
1150     gen_helper_conv_df2d_chop(RddV, cpu_env, RssV)
1151 #define fGEN_TCG_F2_sfadd(SHORTCODE) \
1152     gen_helper_sfadd(RdV, cpu_env, RsV, RtV)
1153 #define fGEN_TCG_F2_sfsub(SHORTCODE) \
1154     gen_helper_sfsub(RdV, cpu_env, RsV, RtV)
1155 #define fGEN_TCG_F2_sfcmpeq(SHORTCODE) \
1156     gen_helper_sfcmpeq(PdV, cpu_env, RsV, RtV)
1157 #define fGEN_TCG_F2_sfcmpgt(SHORTCODE) \
1158     gen_helper_sfcmpgt(PdV, cpu_env, RsV, RtV)
1159 #define fGEN_TCG_F2_sfcmpge(SHORTCODE) \
1160     gen_helper_sfcmpge(PdV, cpu_env, RsV, RtV)
1161 #define fGEN_TCG_F2_sfcmpuo(SHORTCODE) \
1162     gen_helper_sfcmpuo(PdV, cpu_env, RsV, RtV)
1163 #define fGEN_TCG_F2_sfmax(SHORTCODE) \
1164     gen_helper_sfmax(RdV, cpu_env, RsV, RtV)
1165 #define fGEN_TCG_F2_sfmin(SHORTCODE) \
1166     gen_helper_sfmin(RdV, cpu_env, RsV, RtV)
1167 #define fGEN_TCG_F2_sfclass(SHORTCODE) \
1168     do { \
1169         TCGv imm = tcg_constant_tl(uiV); \
1170         gen_helper_sfclass(PdV, cpu_env, RsV, imm); \
1171     } while (0)
1172 #define fGEN_TCG_F2_sffixupn(SHORTCODE) \
1173     gen_helper_sffixupn(RdV, cpu_env, RsV, RtV)
1174 #define fGEN_TCG_F2_sffixupd(SHORTCODE) \
1175     gen_helper_sffixupd(RdV, cpu_env, RsV, RtV)
1176 #define fGEN_TCG_F2_sffixupr(SHORTCODE) \
1177     gen_helper_sffixupr(RdV, cpu_env, RsV)
1178 #define fGEN_TCG_F2_dfadd(SHORTCODE) \
1179     gen_helper_dfadd(RddV, cpu_env, RssV, RttV)
1180 #define fGEN_TCG_F2_dfsub(SHORTCODE) \
1181     gen_helper_dfsub(RddV, cpu_env, RssV, RttV)
1182 #define fGEN_TCG_F2_dfmax(SHORTCODE) \
1183     gen_helper_dfmax(RddV, cpu_env, RssV, RttV)
1184 #define fGEN_TCG_F2_dfmin(SHORTCODE) \
1185     gen_helper_dfmin(RddV, cpu_env, RssV, RttV)
1186 #define fGEN_TCG_F2_dfcmpeq(SHORTCODE) \
1187     gen_helper_dfcmpeq(PdV, cpu_env, RssV, RttV)
1188 #define fGEN_TCG_F2_dfcmpgt(SHORTCODE) \
1189     gen_helper_dfcmpgt(PdV, cpu_env, RssV, RttV)
1190 #define fGEN_TCG_F2_dfcmpge(SHORTCODE) \
1191     gen_helper_dfcmpge(PdV, cpu_env, RssV, RttV)
1192 #define fGEN_TCG_F2_dfcmpuo(SHORTCODE) \
1193     gen_helper_dfcmpuo(PdV, cpu_env, RssV, RttV)
1194 #define fGEN_TCG_F2_dfclass(SHORTCODE) \
1195     do { \
1196         TCGv imm = tcg_constant_tl(uiV); \
1197         gen_helper_dfclass(PdV, cpu_env, RssV, imm); \
1198     } while (0)
1199 #define fGEN_TCG_F2_sfmpy(SHORTCODE) \
1200     gen_helper_sfmpy(RdV, cpu_env, RsV, RtV)
1201 #define fGEN_TCG_F2_sffma(SHORTCODE) \
1202     gen_helper_sffma(RxV, cpu_env, RxV, RsV, RtV)
1203 #define fGEN_TCG_F2_sffma_sc(SHORTCODE) \
1204     gen_helper_sffma_sc(RxV, cpu_env, RxV, RsV, RtV, PuV)
1205 #define fGEN_TCG_F2_sffms(SHORTCODE) \
1206     gen_helper_sffms(RxV, cpu_env, RxV, RsV, RtV)
1207 #define fGEN_TCG_F2_sffma_lib(SHORTCODE) \
1208     gen_helper_sffma_lib(RxV, cpu_env, RxV, RsV, RtV)
1209 #define fGEN_TCG_F2_sffms_lib(SHORTCODE) \
1210     gen_helper_sffms_lib(RxV, cpu_env, RxV, RsV, RtV)
1211 
1212 #define fGEN_TCG_F2_dfmpyfix(SHORTCODE) \
1213     gen_helper_dfmpyfix(RddV, cpu_env, RssV, RttV)
1214 #define fGEN_TCG_F2_dfmpyhh(SHORTCODE) \
1215     gen_helper_dfmpyhh(RxxV, cpu_env, RxxV, RssV, RttV)
1216 
1217 /* Nothing to do for these in qemu, need to suppress compiler warnings */
1218 #define fGEN_TCG_Y4_l2fetch(SHORTCODE) \
1219     do { \
1220         RsV = RsV; \
1221         RtV = RtV; \
1222     } while (0)
1223 #define fGEN_TCG_Y5_l2fetch(SHORTCODE) \
1224     do { \
1225         RsV = RsV; \
1226     } while (0)
1227 #define fGEN_TCG_Y2_isync(SHORTCODE) \
1228     do { } while (0)
1229 #define fGEN_TCG_Y2_barrier(SHORTCODE) \
1230     do { } while (0)
1231 #define fGEN_TCG_Y2_syncht(SHORTCODE) \
1232     do { } while (0)
1233 #define fGEN_TCG_Y2_dcfetchbo(SHORTCODE) \
1234     do { \
1235         RsV = RsV; \
1236         uiV = uiV; \
1237     } while (0)
1238 
1239 #define fGEN_TCG_J2_trap0(SHORTCODE) \
1240     do { \
1241         uiV = uiV; \
1242         tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->pkt->pc); \
1243         TCGv excp = tcg_constant_tl(HEX_EXCP_TRAP0); \
1244         gen_helper_raise_exception(cpu_env, excp); \
1245     } while (0)
1246 #endif
1247