xref: /openbmc/qemu/target/hexagon/gen_tcg_hvx.h (revision 29b62a10)
1 /*
2  *  Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify
5  *  it under the terms of the GNU General Public License as published by
6  *  the Free Software Foundation; either version 2 of the License, or
7  *  (at your option) any later version.
8  *
9  *  This program is distributed in the hope that it will be useful,
10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  *  GNU General Public License for more details.
13  *
14  *  You should have received a copy of the GNU General Public License
15  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #ifndef HEXAGON_GEN_TCG_HVX_H
19 #define HEXAGON_GEN_TCG_HVX_H
20 
21 /*
22  * Histogram instructions
23  *
24  * Note that these instructions operate directly on the vector registers
25  * and therefore happen after commit.
26  *
27  * The generate_<tag> function is called twice
28  *     The first time is during the normal TCG generation
29  *         ctx->pre_commit is true
30  *         In the masked cases, we save the mask to the qtmp temporary
31  *         Otherwise, there is nothing to do
32  *     The second call is at the end of gen_commit_packet
33  *         ctx->pre_commit is false
34  *         Generate the call to the helper
35  */
36 
37 static inline void assert_vhist_tmp(DisasContext *ctx)
38 {
39     /* vhist instructions require exactly one .tmp to be defined */
40     g_assert(ctx->tmp_vregs_idx == 1);
41 }
42 
43 #define fGEN_TCG_V6_vhist(SHORTCODE) \
44     if (!ctx->pre_commit) { \
45         assert_vhist_tmp(ctx); \
46         gen_helper_vhist(cpu_env); \
47     }
48 #define fGEN_TCG_V6_vhistq(SHORTCODE) \
49     do { \
50         if (ctx->pre_commit) { \
51             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
52             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
53                              sizeof(MMVector), sizeof(MMVector)); \
54         } else { \
55             assert_vhist_tmp(ctx); \
56             gen_helper_vhistq(cpu_env); \
57         } \
58     } while (0)
59 #define fGEN_TCG_V6_vwhist256(SHORTCODE) \
60     if (!ctx->pre_commit) { \
61         assert_vhist_tmp(ctx); \
62         gen_helper_vwhist256(cpu_env); \
63     }
64 #define fGEN_TCG_V6_vwhist256q(SHORTCODE) \
65     do { \
66         if (ctx->pre_commit) { \
67             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
68             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
69                              sizeof(MMVector), sizeof(MMVector)); \
70         } else { \
71             assert_vhist_tmp(ctx); \
72             gen_helper_vwhist256q(cpu_env); \
73         } \
74     } while (0)
75 #define fGEN_TCG_V6_vwhist256_sat(SHORTCODE) \
76     if (!ctx->pre_commit) { \
77         assert_vhist_tmp(ctx); \
78         gen_helper_vwhist256_sat(cpu_env); \
79     }
80 #define fGEN_TCG_V6_vwhist256q_sat(SHORTCODE) \
81     do { \
82         if (ctx->pre_commit) { \
83             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
84             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
85                              sizeof(MMVector), sizeof(MMVector)); \
86         } else { \
87             assert_vhist_tmp(ctx); \
88             gen_helper_vwhist256q_sat(cpu_env); \
89         } \
90     } while (0)
91 #define fGEN_TCG_V6_vwhist128(SHORTCODE) \
92     if (!ctx->pre_commit) { \
93         assert_vhist_tmp(ctx); \
94         gen_helper_vwhist128(cpu_env); \
95     }
96 #define fGEN_TCG_V6_vwhist128q(SHORTCODE) \
97     do { \
98         if (ctx->pre_commit) { \
99             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
100             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
101                              sizeof(MMVector), sizeof(MMVector)); \
102         } else { \
103             assert_vhist_tmp(ctx); \
104             gen_helper_vwhist128q(cpu_env); \
105         } \
106     } while (0)
107 #define fGEN_TCG_V6_vwhist128m(SHORTCODE) \
108     if (!ctx->pre_commit) { \
109         TCGv tcgv_uiV = tcg_constant_tl(uiV); \
110         assert_vhist_tmp(ctx); \
111         gen_helper_vwhist128m(cpu_env, tcgv_uiV); \
112     }
113 #define fGEN_TCG_V6_vwhist128qm(SHORTCODE) \
114     do { \
115         if (ctx->pre_commit) { \
116             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
117             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
118                              sizeof(MMVector), sizeof(MMVector)); \
119         } else { \
120             TCGv tcgv_uiV = tcg_constant_tl(uiV); \
121             assert_vhist_tmp(ctx); \
122             gen_helper_vwhist128qm(cpu_env, tcgv_uiV); \
123         } \
124     } while (0)
125 
126 
127 #define fGEN_TCG_V6_vassign(SHORTCODE) \
128     tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
129                      sizeof(MMVector), sizeof(MMVector))
130 
131 /* Vector conditional move */
132 #define fGEN_TCG_VEC_CMOV(PRED) \
133     do { \
134         TCGv lsb = tcg_temp_new(); \
135         TCGLabel *false_label = gen_new_label(); \
136         TCGLabel *end_label = gen_new_label(); \
137         tcg_gen_andi_tl(lsb, PsV, 1); \
138         tcg_gen_brcondi_tl(TCG_COND_NE, lsb, PRED, false_label); \
139         tcg_temp_free(lsb); \
140         tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
141                          sizeof(MMVector), sizeof(MMVector)); \
142         tcg_gen_br(end_label); \
143         gen_set_label(false_label); \
144         tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
145                        1 << insn->slot); \
146         gen_set_label(end_label); \
147     } while (0)
148 
149 
150 /* Vector conditional move (true) */
151 #define fGEN_TCG_V6_vcmov(SHORTCODE) \
152     fGEN_TCG_VEC_CMOV(1)
153 
154 /* Vector conditional move (false) */
155 #define fGEN_TCG_V6_vncmov(SHORTCODE) \
156     fGEN_TCG_VEC_CMOV(0)
157 
158 /* Vector add - various forms */
159 #define fGEN_TCG_V6_vaddb(SHORTCODE) \
160     tcg_gen_gvec_add(MO_8, VdV_off, VuV_off, VvV_off, \
161                      sizeof(MMVector), sizeof(MMVector))
162 
163 #define fGEN_TCG_V6_vaddh(SHORTCYDE) \
164     tcg_gen_gvec_add(MO_16, VdV_off, VuV_off, VvV_off, \
165                      sizeof(MMVector), sizeof(MMVector))
166 
167 #define fGEN_TCG_V6_vaddw(SHORTCODE) \
168     tcg_gen_gvec_add(MO_32, VdV_off, VuV_off, VvV_off, \
169                      sizeof(MMVector), sizeof(MMVector))
170 
171 #define fGEN_TCG_V6_vaddb_dv(SHORTCODE) \
172     tcg_gen_gvec_add(MO_8, VddV_off, VuuV_off, VvvV_off, \
173                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
174 
175 #define fGEN_TCG_V6_vaddh_dv(SHORTCYDE) \
176     tcg_gen_gvec_add(MO_16, VddV_off, VuuV_off, VvvV_off, \
177                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
178 
179 #define fGEN_TCG_V6_vaddw_dv(SHORTCODE) \
180     tcg_gen_gvec_add(MO_32, VddV_off, VuuV_off, VvvV_off, \
181                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
182 
183 /* Vector sub - various forms */
184 #define fGEN_TCG_V6_vsubb(SHORTCODE) \
185     tcg_gen_gvec_sub(MO_8, VdV_off, VuV_off, VvV_off, \
186                      sizeof(MMVector), sizeof(MMVector))
187 
188 #define fGEN_TCG_V6_vsubh(SHORTCODE) \
189     tcg_gen_gvec_sub(MO_16, VdV_off, VuV_off, VvV_off, \
190                      sizeof(MMVector), sizeof(MMVector))
191 
192 #define fGEN_TCG_V6_vsubw(SHORTCODE) \
193     tcg_gen_gvec_sub(MO_32, VdV_off, VuV_off, VvV_off, \
194                      sizeof(MMVector), sizeof(MMVector))
195 
196 #define fGEN_TCG_V6_vsubb_dv(SHORTCODE) \
197     tcg_gen_gvec_sub(MO_8, VddV_off, VuuV_off, VvvV_off, \
198                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
199 
200 #define fGEN_TCG_V6_vsubh_dv(SHORTCODE) \
201     tcg_gen_gvec_sub(MO_16, VddV_off, VuuV_off, VvvV_off, \
202                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
203 
204 #define fGEN_TCG_V6_vsubw_dv(SHORTCODE) \
205     tcg_gen_gvec_sub(MO_32, VddV_off, VuuV_off, VvvV_off, \
206                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
207 
208 /* Vector shift right - various forms */
209 #define fGEN_TCG_V6_vasrh(SHORTCODE) \
210     do { \
211         TCGv shift = tcg_temp_new(); \
212         tcg_gen_andi_tl(shift, RtV, 15); \
213         tcg_gen_gvec_sars(MO_16, VdV_off, VuV_off, shift, \
214                           sizeof(MMVector), sizeof(MMVector)); \
215         tcg_temp_free(shift); \
216     } while (0)
217 
218 #define fGEN_TCG_V6_vasrh_acc(SHORTCODE) \
219     do { \
220         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
221         TCGv shift = tcg_temp_new(); \
222         tcg_gen_andi_tl(shift, RtV, 15); \
223         tcg_gen_gvec_sars(MO_16, tmpoff, VuV_off, shift, \
224                           sizeof(MMVector), sizeof(MMVector)); \
225         tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
226                          sizeof(MMVector), sizeof(MMVector)); \
227         tcg_temp_free(shift); \
228     } while (0)
229 
230 #define fGEN_TCG_V6_vasrw(SHORTCODE) \
231     do { \
232         TCGv shift = tcg_temp_new(); \
233         tcg_gen_andi_tl(shift, RtV, 31); \
234         tcg_gen_gvec_sars(MO_32, VdV_off, VuV_off, shift, \
235                           sizeof(MMVector), sizeof(MMVector)); \
236         tcg_temp_free(shift); \
237     } while (0)
238 
239 #define fGEN_TCG_V6_vasrw_acc(SHORTCODE) \
240     do { \
241         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
242         TCGv shift = tcg_temp_new(); \
243         tcg_gen_andi_tl(shift, RtV, 31); \
244         tcg_gen_gvec_sars(MO_32, tmpoff, VuV_off, shift, \
245                           sizeof(MMVector), sizeof(MMVector)); \
246         tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
247                           sizeof(MMVector), sizeof(MMVector)); \
248         tcg_temp_free(shift); \
249     } while (0)
250 
251 #define fGEN_TCG_V6_vlsrb(SHORTCODE) \
252     do { \
253         TCGv shift = tcg_temp_new(); \
254         tcg_gen_andi_tl(shift, RtV, 7); \
255         tcg_gen_gvec_shrs(MO_8, VdV_off, VuV_off, shift, \
256                           sizeof(MMVector), sizeof(MMVector)); \
257         tcg_temp_free(shift); \
258     } while (0)
259 
260 #define fGEN_TCG_V6_vlsrh(SHORTCODE) \
261     do { \
262         TCGv shift = tcg_temp_new(); \
263         tcg_gen_andi_tl(shift, RtV, 15); \
264         tcg_gen_gvec_shrs(MO_16, VdV_off, VuV_off, shift, \
265                           sizeof(MMVector), sizeof(MMVector)); \
266         tcg_temp_free(shift); \
267     } while (0)
268 
269 #define fGEN_TCG_V6_vlsrw(SHORTCODE) \
270     do { \
271         TCGv shift = tcg_temp_new(); \
272         tcg_gen_andi_tl(shift, RtV, 31); \
273         tcg_gen_gvec_shrs(MO_32, VdV_off, VuV_off, shift, \
274                           sizeof(MMVector), sizeof(MMVector)); \
275         tcg_temp_free(shift); \
276     } while (0)
277 
278 /* Vector shift left - various forms */
279 #define fGEN_TCG_V6_vaslb(SHORTCODE) \
280     do { \
281         TCGv shift = tcg_temp_new(); \
282         tcg_gen_andi_tl(shift, RtV, 7); \
283         tcg_gen_gvec_shls(MO_8, VdV_off, VuV_off, shift, \
284                           sizeof(MMVector), sizeof(MMVector)); \
285         tcg_temp_free(shift); \
286     } while (0)
287 
288 #define fGEN_TCG_V6_vaslh(SHORTCODE) \
289     do { \
290         TCGv shift = tcg_temp_new(); \
291         tcg_gen_andi_tl(shift, RtV, 15); \
292         tcg_gen_gvec_shls(MO_16, VdV_off, VuV_off, shift, \
293                           sizeof(MMVector), sizeof(MMVector)); \
294         tcg_temp_free(shift); \
295     } while (0)
296 
297 #define fGEN_TCG_V6_vaslh_acc(SHORTCODE) \
298     do { \
299         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
300         TCGv shift = tcg_temp_new(); \
301         tcg_gen_andi_tl(shift, RtV, 15); \
302         tcg_gen_gvec_shls(MO_16, tmpoff, VuV_off, shift, \
303                           sizeof(MMVector), sizeof(MMVector)); \
304         tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
305                          sizeof(MMVector), sizeof(MMVector)); \
306         tcg_temp_free(shift); \
307     } while (0)
308 
309 #define fGEN_TCG_V6_vaslw(SHORTCODE) \
310     do { \
311         TCGv shift = tcg_temp_new(); \
312         tcg_gen_andi_tl(shift, RtV, 31); \
313         tcg_gen_gvec_shls(MO_32, VdV_off, VuV_off, shift, \
314                           sizeof(MMVector), sizeof(MMVector)); \
315         tcg_temp_free(shift); \
316     } while (0)
317 
318 #define fGEN_TCG_V6_vaslw_acc(SHORTCODE) \
319     do { \
320         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
321         TCGv shift = tcg_temp_new(); \
322         tcg_gen_andi_tl(shift, RtV, 31); \
323         tcg_gen_gvec_shls(MO_32, tmpoff, VuV_off, shift, \
324                           sizeof(MMVector), sizeof(MMVector)); \
325         tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
326                          sizeof(MMVector), sizeof(MMVector)); \
327         tcg_temp_free(shift); \
328     } while (0)
329 
330 /* Vector max - various forms */
331 #define fGEN_TCG_V6_vmaxw(SHORTCODE) \
332     tcg_gen_gvec_smax(MO_32, VdV_off, VuV_off, VvV_off, \
333                       sizeof(MMVector), sizeof(MMVector))
334 #define fGEN_TCG_V6_vmaxh(SHORTCODE) \
335     tcg_gen_gvec_smax(MO_16, VdV_off, VuV_off, VvV_off, \
336                       sizeof(MMVector), sizeof(MMVector))
337 #define fGEN_TCG_V6_vmaxuh(SHORTCODE) \
338     tcg_gen_gvec_umax(MO_16, VdV_off, VuV_off, VvV_off, \
339                       sizeof(MMVector), sizeof(MMVector))
340 #define fGEN_TCG_V6_vmaxb(SHORTCODE) \
341     tcg_gen_gvec_smax(MO_8, VdV_off, VuV_off, VvV_off, \
342                       sizeof(MMVector), sizeof(MMVector))
343 #define fGEN_TCG_V6_vmaxub(SHORTCODE) \
344     tcg_gen_gvec_umax(MO_8, VdV_off, VuV_off, VvV_off, \
345                       sizeof(MMVector), sizeof(MMVector))
346 
347 /* Vector min - various forms */
348 #define fGEN_TCG_V6_vminw(SHORTCODE) \
349     tcg_gen_gvec_smin(MO_32, VdV_off, VuV_off, VvV_off, \
350                       sizeof(MMVector), sizeof(MMVector))
351 #define fGEN_TCG_V6_vminh(SHORTCODE) \
352     tcg_gen_gvec_smin(MO_16, VdV_off, VuV_off, VvV_off, \
353                       sizeof(MMVector), sizeof(MMVector))
354 #define fGEN_TCG_V6_vminuh(SHORTCODE) \
355     tcg_gen_gvec_umin(MO_16, VdV_off, VuV_off, VvV_off, \
356                       sizeof(MMVector), sizeof(MMVector))
357 #define fGEN_TCG_V6_vminb(SHORTCODE) \
358     tcg_gen_gvec_smin(MO_8, VdV_off, VuV_off, VvV_off, \
359                       sizeof(MMVector), sizeof(MMVector))
360 #define fGEN_TCG_V6_vminub(SHORTCODE) \
361     tcg_gen_gvec_umin(MO_8, VdV_off, VuV_off, VvV_off, \
362                       sizeof(MMVector), sizeof(MMVector))
363 
364 /* Vector logical ops */
365 #define fGEN_TCG_V6_vxor(SHORTCODE) \
366     tcg_gen_gvec_xor(MO_64, VdV_off, VuV_off, VvV_off, \
367                      sizeof(MMVector), sizeof(MMVector))
368 
369 #define fGEN_TCG_V6_vand(SHORTCODE) \
370     tcg_gen_gvec_and(MO_64, VdV_off, VuV_off, VvV_off, \
371                      sizeof(MMVector), sizeof(MMVector))
372 
373 #define fGEN_TCG_V6_vor(SHORTCODE) \
374     tcg_gen_gvec_or(MO_64, VdV_off, VuV_off, VvV_off, \
375                     sizeof(MMVector), sizeof(MMVector))
376 
377 #define fGEN_TCG_V6_vnot(SHORTCODE) \
378     tcg_gen_gvec_not(MO_64, VdV_off, VuV_off, \
379                      sizeof(MMVector), sizeof(MMVector))
380 
381 /* Q register logical ops */
382 #define fGEN_TCG_V6_pred_or(SHORTCODE) \
383     tcg_gen_gvec_or(MO_64, QdV_off, QsV_off, QtV_off, \
384                     sizeof(MMQReg), sizeof(MMQReg))
385 
386 #define fGEN_TCG_V6_pred_and(SHORTCODE) \
387     tcg_gen_gvec_and(MO_64, QdV_off, QsV_off, QtV_off, \
388                      sizeof(MMQReg), sizeof(MMQReg))
389 
390 #define fGEN_TCG_V6_pred_xor(SHORTCODE) \
391     tcg_gen_gvec_xor(MO_64, QdV_off, QsV_off, QtV_off, \
392                      sizeof(MMQReg), sizeof(MMQReg))
393 
394 #define fGEN_TCG_V6_pred_or_n(SHORTCODE) \
395     tcg_gen_gvec_orc(MO_64, QdV_off, QsV_off, QtV_off, \
396                      sizeof(MMQReg), sizeof(MMQReg))
397 
398 #define fGEN_TCG_V6_pred_and_n(SHORTCODE) \
399     tcg_gen_gvec_andc(MO_64, QdV_off, QsV_off, QtV_off, \
400                       sizeof(MMQReg), sizeof(MMQReg))
401 
402 #define fGEN_TCG_V6_pred_not(SHORTCODE) \
403     tcg_gen_gvec_not(MO_64, QdV_off, QsV_off, \
404                      sizeof(MMQReg), sizeof(MMQReg))
405 
406 /* Vector compares */
407 #define fGEN_TCG_VEC_CMP(COND, TYPE, SIZE) \
408     do { \
409         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
410         tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
411                          sizeof(MMVector), sizeof(MMVector)); \
412         vec_to_qvec(SIZE, QdV_off, tmpoff); \
413     } while (0)
414 
415 #define fGEN_TCG_V6_vgtw(SHORTCODE) \
416     fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_32, 4)
417 #define fGEN_TCG_V6_vgth(SHORTCODE) \
418     fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_16, 2)
419 #define fGEN_TCG_V6_vgtb(SHORTCODE) \
420     fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_8, 1)
421 
422 #define fGEN_TCG_V6_vgtuw(SHORTCODE) \
423     fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_32, 4)
424 #define fGEN_TCG_V6_vgtuh(SHORTCODE) \
425     fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_16, 2)
426 #define fGEN_TCG_V6_vgtub(SHORTCODE) \
427     fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_8, 1)
428 
429 #define fGEN_TCG_V6_veqw(SHORTCODE) \
430     fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_32, 4)
431 #define fGEN_TCG_V6_veqh(SHORTCODE) \
432     fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_16, 2)
433 #define fGEN_TCG_V6_veqb(SHORTCODE) \
434     fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_8, 1)
435 
436 #define fGEN_TCG_VEC_CMP_OP(COND, TYPE, SIZE, OP) \
437     do { \
438         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
439         intptr_t qoff = offsetof(CPUHexagonState, qtmp); \
440         tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
441                          sizeof(MMVector), sizeof(MMVector)); \
442         vec_to_qvec(SIZE, qoff, tmpoff); \
443         OP(MO_64, QxV_off, QxV_off, qoff, sizeof(MMQReg), sizeof(MMQReg)); \
444     } while (0)
445 
446 #define fGEN_TCG_V6_vgtw_and(SHORTCODE) \
447     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_and)
448 #define fGEN_TCG_V6_vgtw_or(SHORTCODE) \
449     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_or)
450 #define fGEN_TCG_V6_vgtw_xor(SHORTCODE) \
451     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_xor)
452 
453 #define fGEN_TCG_V6_vgtuw_and(SHORTCODE) \
454     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_and)
455 #define fGEN_TCG_V6_vgtuw_or(SHORTCODE) \
456     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_or)
457 #define fGEN_TCG_V6_vgtuw_xor(SHORTCODE) \
458     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_xor)
459 
460 #define fGEN_TCG_V6_vgth_and(SHORTCODE) \
461     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_and)
462 #define fGEN_TCG_V6_vgth_or(SHORTCODE) \
463     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_or)
464 #define fGEN_TCG_V6_vgth_xor(SHORTCODE) \
465     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_xor)
466 
467 #define fGEN_TCG_V6_vgtuh_and(SHORTCODE) \
468     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_and)
469 #define fGEN_TCG_V6_vgtuh_or(SHORTCODE) \
470     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_or)
471 #define fGEN_TCG_V6_vgtuh_xor(SHORTCODE) \
472     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_xor)
473 
474 #define fGEN_TCG_V6_vgtb_and(SHORTCODE) \
475     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_and)
476 #define fGEN_TCG_V6_vgtb_or(SHORTCODE) \
477     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_or)
478 #define fGEN_TCG_V6_vgtb_xor(SHORTCODE) \
479     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_xor)
480 
481 #define fGEN_TCG_V6_vgtub_and(SHORTCODE) \
482     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_and)
483 #define fGEN_TCG_V6_vgtub_or(SHORTCODE) \
484     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_or)
485 #define fGEN_TCG_V6_vgtub_xor(SHORTCODE) \
486     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_xor)
487 
488 #define fGEN_TCG_V6_veqw_and(SHORTCODE) \
489     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_and)
490 #define fGEN_TCG_V6_veqw_or(SHORTCODE) \
491     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_or)
492 #define fGEN_TCG_V6_veqw_xor(SHORTCODE) \
493     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_xor)
494 
495 #define fGEN_TCG_V6_veqh_and(SHORTCODE) \
496     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_and)
497 #define fGEN_TCG_V6_veqh_or(SHORTCODE) \
498     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_or)
499 #define fGEN_TCG_V6_veqh_xor(SHORTCODE) \
500     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_xor)
501 
502 #define fGEN_TCG_V6_veqb_and(SHORTCODE) \
503     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_and)
504 #define fGEN_TCG_V6_veqb_or(SHORTCODE) \
505     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_or)
506 #define fGEN_TCG_V6_veqb_xor(SHORTCODE) \
507     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_xor)
508 
509 /* Vector splat - various forms */
510 #define fGEN_TCG_V6_lvsplatw(SHORTCODE) \
511     tcg_gen_gvec_dup_i32(MO_32, VdV_off, \
512                          sizeof(MMVector), sizeof(MMVector), RtV)
513 
514 #define fGEN_TCG_V6_lvsplath(SHORTCODE) \
515     tcg_gen_gvec_dup_i32(MO_16, VdV_off, \
516                          sizeof(MMVector), sizeof(MMVector), RtV)
517 
518 #define fGEN_TCG_V6_lvsplatb(SHORTCODE) \
519     tcg_gen_gvec_dup_i32(MO_8, VdV_off, \
520                          sizeof(MMVector), sizeof(MMVector), RtV)
521 
522 /* Vector absolute value - various forms */
523 #define fGEN_TCG_V6_vabsb(SHORTCODE) \
524     tcg_gen_gvec_abs(MO_8, VdV_off, VuV_off, \
525                      sizeof(MMVector), sizeof(MMVector))
526 
527 #define fGEN_TCG_V6_vabsh(SHORTCODE) \
528     tcg_gen_gvec_abs(MO_16, VdV_off, VuV_off, \
529                      sizeof(MMVector), sizeof(MMVector))
530 
531 #define fGEN_TCG_V6_vabsw(SHORTCODE) \
532     tcg_gen_gvec_abs(MO_32, VdV_off, VuV_off, \
533                      sizeof(MMVector), sizeof(MMVector))
534 
535 /* Vector loads */
536 #define fGEN_TCG_V6_vL32b_pi(SHORTCODE)                    SHORTCODE
537 #define fGEN_TCG_V6_vL32Ub_pi(SHORTCODE)                   SHORTCODE
538 #define fGEN_TCG_V6_vL32b_cur_pi(SHORTCODE)                SHORTCODE
539 #define fGEN_TCG_V6_vL32b_tmp_pi(SHORTCODE)                SHORTCODE
540 #define fGEN_TCG_V6_vL32b_nt_pi(SHORTCODE)                 SHORTCODE
541 #define fGEN_TCG_V6_vL32b_nt_cur_pi(SHORTCODE)             SHORTCODE
542 #define fGEN_TCG_V6_vL32b_nt_tmp_pi(SHORTCODE)             SHORTCODE
543 #define fGEN_TCG_V6_vL32b_ai(SHORTCODE)                    SHORTCODE
544 #define fGEN_TCG_V6_vL32Ub_ai(SHORTCODE)                   SHORTCODE
545 #define fGEN_TCG_V6_vL32b_cur_ai(SHORTCODE)                SHORTCODE
546 #define fGEN_TCG_V6_vL32b_tmp_ai(SHORTCODE)                SHORTCODE
547 #define fGEN_TCG_V6_vL32b_nt_ai(SHORTCODE)                 SHORTCODE
548 #define fGEN_TCG_V6_vL32b_nt_cur_ai(SHORTCODE)             SHORTCODE
549 #define fGEN_TCG_V6_vL32b_nt_tmp_ai(SHORTCODE)             SHORTCODE
550 #define fGEN_TCG_V6_vL32b_ppu(SHORTCODE)                   SHORTCODE
551 #define fGEN_TCG_V6_vL32Ub_ppu(SHORTCODE)                  SHORTCODE
552 #define fGEN_TCG_V6_vL32b_cur_ppu(SHORTCODE)               SHORTCODE
553 #define fGEN_TCG_V6_vL32b_tmp_ppu(SHORTCODE)               SHORTCODE
554 #define fGEN_TCG_V6_vL32b_nt_ppu(SHORTCODE)                SHORTCODE
555 #define fGEN_TCG_V6_vL32b_nt_cur_ppu(SHORTCODE)            SHORTCODE
556 #define fGEN_TCG_V6_vL32b_nt_tmp_ppu(SHORTCODE)            SHORTCODE
557 
558 /* Predicated vector loads */
559 #define fGEN_TCG_PRED_VEC_LOAD(GET_EA, PRED, DSTOFF, INC) \
560     do { \
561         TCGv LSB = tcg_temp_new(); \
562         TCGLabel *false_label = gen_new_label(); \
563         TCGLabel *end_label = gen_new_label(); \
564         GET_EA; \
565         PRED; \
566         tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
567         tcg_temp_free(LSB); \
568         gen_vreg_load(ctx, DSTOFF, EA, true); \
569         INC; \
570         tcg_gen_br(end_label); \
571         gen_set_label(false_label); \
572         tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
573                        1 << insn->slot); \
574         gen_set_label(end_label); \
575     } while (0)
576 
577 #define fGEN_TCG_PRED_VEC_LOAD_pred_pi \
578     fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
579                            fEA_REG(RxV), \
580                            VdV_off, \
581                            fPM_I(RxV, siV * sizeof(MMVector)))
582 #define fGEN_TCG_PRED_VEC_LOAD_npred_pi \
583     fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
584                            fEA_REG(RxV), \
585                            VdV_off, \
586                            fPM_I(RxV, siV * sizeof(MMVector)))
587 
588 #define fGEN_TCG_V6_vL32b_pred_pi(SHORTCODE) \
589     fGEN_TCG_PRED_VEC_LOAD_pred_pi
590 #define fGEN_TCG_V6_vL32b_npred_pi(SHORTCODE) \
591     fGEN_TCG_PRED_VEC_LOAD_npred_pi
592 #define fGEN_TCG_V6_vL32b_cur_pred_pi(SHORTCODE) \
593     fGEN_TCG_PRED_VEC_LOAD_pred_pi
594 #define fGEN_TCG_V6_vL32b_cur_npred_pi(SHORTCODE) \
595     fGEN_TCG_PRED_VEC_LOAD_npred_pi
596 #define fGEN_TCG_V6_vL32b_tmp_pred_pi(SHORTCODE) \
597     fGEN_TCG_PRED_VEC_LOAD_pred_pi
598 #define fGEN_TCG_V6_vL32b_tmp_npred_pi(SHORTCODE) \
599     fGEN_TCG_PRED_VEC_LOAD_npred_pi
600 #define fGEN_TCG_V6_vL32b_nt_pred_pi(SHORTCODE) \
601     fGEN_TCG_PRED_VEC_LOAD_pred_pi
602 #define fGEN_TCG_V6_vL32b_nt_npred_pi(SHORTCODE) \
603     fGEN_TCG_PRED_VEC_LOAD_npred_pi
604 #define fGEN_TCG_V6_vL32b_nt_cur_pred_pi(SHORTCODE) \
605     fGEN_TCG_PRED_VEC_LOAD_pred_pi
606 #define fGEN_TCG_V6_vL32b_nt_cur_npred_pi(SHORTCODE) \
607     fGEN_TCG_PRED_VEC_LOAD_npred_pi
608 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_pi(SHORTCODE) \
609     fGEN_TCG_PRED_VEC_LOAD_pred_pi
610 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_pi(SHORTCODE) \
611     fGEN_TCG_PRED_VEC_LOAD_npred_pi
612 
613 #define fGEN_TCG_PRED_VEC_LOAD_pred_ai \
614     fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
615                            fEA_RI(RtV, siV * sizeof(MMVector)), \
616                            VdV_off, \
617                            do {} while (0))
618 #define fGEN_TCG_PRED_VEC_LOAD_npred_ai \
619     fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
620                            fEA_RI(RtV, siV * sizeof(MMVector)), \
621                            VdV_off, \
622                            do {} while (0))
623 
624 #define fGEN_TCG_V6_vL32b_pred_ai(SHORTCODE) \
625     fGEN_TCG_PRED_VEC_LOAD_pred_ai
626 #define fGEN_TCG_V6_vL32b_npred_ai(SHORTCODE) \
627     fGEN_TCG_PRED_VEC_LOAD_npred_ai
628 #define fGEN_TCG_V6_vL32b_cur_pred_ai(SHORTCODE) \
629     fGEN_TCG_PRED_VEC_LOAD_pred_ai
630 #define fGEN_TCG_V6_vL32b_cur_npred_ai(SHORTCODE) \
631     fGEN_TCG_PRED_VEC_LOAD_npred_ai
632 #define fGEN_TCG_V6_vL32b_tmp_pred_ai(SHORTCODE) \
633     fGEN_TCG_PRED_VEC_LOAD_pred_ai
634 #define fGEN_TCG_V6_vL32b_tmp_npred_ai(SHORTCODE) \
635     fGEN_TCG_PRED_VEC_LOAD_npred_ai
636 #define fGEN_TCG_V6_vL32b_nt_pred_ai(SHORTCODE) \
637     fGEN_TCG_PRED_VEC_LOAD_pred_ai
638 #define fGEN_TCG_V6_vL32b_nt_npred_ai(SHORTCODE) \
639     fGEN_TCG_PRED_VEC_LOAD_npred_ai
640 #define fGEN_TCG_V6_vL32b_nt_cur_pred_ai(SHORTCODE) \
641     fGEN_TCG_PRED_VEC_LOAD_pred_ai
642 #define fGEN_TCG_V6_vL32b_nt_cur_npred_ai(SHORTCODE) \
643     fGEN_TCG_PRED_VEC_LOAD_npred_ai
644 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_ai(SHORTCODE) \
645     fGEN_TCG_PRED_VEC_LOAD_pred_ai
646 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_ai(SHORTCODE) \
647     fGEN_TCG_PRED_VEC_LOAD_npred_ai
648 
649 #define fGEN_TCG_PRED_VEC_LOAD_pred_ppu \
650     fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
651                            fEA_REG(RxV), \
652                            VdV_off, \
653                            fPM_M(RxV, MuV))
654 #define fGEN_TCG_PRED_VEC_LOAD_npred_ppu \
655     fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
656                            fEA_REG(RxV), \
657                            VdV_off, \
658                            fPM_M(RxV, MuV))
659 
660 #define fGEN_TCG_V6_vL32b_pred_ppu(SHORTCODE) \
661     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
662 #define fGEN_TCG_V6_vL32b_npred_ppu(SHORTCODE) \
663     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
664 #define fGEN_TCG_V6_vL32b_cur_pred_ppu(SHORTCODE) \
665     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
666 #define fGEN_TCG_V6_vL32b_cur_npred_ppu(SHORTCODE) \
667     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
668 #define fGEN_TCG_V6_vL32b_tmp_pred_ppu(SHORTCODE) \
669     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
670 #define fGEN_TCG_V6_vL32b_tmp_npred_ppu(SHORTCODE) \
671     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
672 #define fGEN_TCG_V6_vL32b_nt_pred_ppu(SHORTCODE) \
673     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
674 #define fGEN_TCG_V6_vL32b_nt_npred_ppu(SHORTCODE) \
675     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
676 #define fGEN_TCG_V6_vL32b_nt_cur_pred_ppu(SHORTCODE) \
677     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
678 #define fGEN_TCG_V6_vL32b_nt_cur_npred_ppu(SHORTCODE) \
679     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
680 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_ppu(SHORTCODE) \
681     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
682 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_ppu(SHORTCODE) \
683     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
684 
685 /* Vector stores */
686 #define fGEN_TCG_V6_vS32b_pi(SHORTCODE)                    SHORTCODE
687 #define fGEN_TCG_V6_vS32Ub_pi(SHORTCODE)                   SHORTCODE
688 #define fGEN_TCG_V6_vS32b_nt_pi(SHORTCODE)                 SHORTCODE
689 #define fGEN_TCG_V6_vS32b_ai(SHORTCODE)                    SHORTCODE
690 #define fGEN_TCG_V6_vS32Ub_ai(SHORTCODE)                   SHORTCODE
691 #define fGEN_TCG_V6_vS32b_nt_ai(SHORTCODE)                 SHORTCODE
692 #define fGEN_TCG_V6_vS32b_ppu(SHORTCODE)                   SHORTCODE
693 #define fGEN_TCG_V6_vS32Ub_ppu(SHORTCODE)                  SHORTCODE
694 #define fGEN_TCG_V6_vS32b_nt_ppu(SHORTCODE)                SHORTCODE
695 
696 /* New value vector stores */
697 #define fGEN_TCG_NEWVAL_VEC_STORE(GET_EA, INC) \
698     do { \
699         GET_EA; \
700         gen_vreg_store(ctx, EA, OsN_off, insn->slot, true); \
701         INC; \
702     } while (0)
703 
704 #define fGEN_TCG_NEWVAL_VEC_STORE_pi \
705     fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_I(RxV, siV * sizeof(MMVector)))
706 
707 #define fGEN_TCG_V6_vS32b_new_pi(SHORTCODE) \
708     fGEN_TCG_NEWVAL_VEC_STORE_pi
709 #define fGEN_TCG_V6_vS32b_nt_new_pi(SHORTCODE) \
710     fGEN_TCG_NEWVAL_VEC_STORE_pi
711 
712 #define fGEN_TCG_NEWVAL_VEC_STORE_ai \
713     fGEN_TCG_NEWVAL_VEC_STORE(fEA_RI(RtV, siV * sizeof(MMVector)), \
714                               do { } while (0))
715 
716 #define fGEN_TCG_V6_vS32b_new_ai(SHORTCODE) \
717     fGEN_TCG_NEWVAL_VEC_STORE_ai
718 #define fGEN_TCG_V6_vS32b_nt_new_ai(SHORTCODE) \
719     fGEN_TCG_NEWVAL_VEC_STORE_ai
720 
721 #define fGEN_TCG_NEWVAL_VEC_STORE_ppu \
722     fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_M(RxV, MuV))
723 
724 #define fGEN_TCG_V6_vS32b_new_ppu(SHORTCODE) \
725     fGEN_TCG_NEWVAL_VEC_STORE_ppu
726 #define fGEN_TCG_V6_vS32b_nt_new_ppu(SHORTCODE) \
727     fGEN_TCG_NEWVAL_VEC_STORE_ppu
728 
729 /* Predicated vector stores */
730 #define fGEN_TCG_PRED_VEC_STORE(GET_EA, PRED, SRCOFF, ALIGN, INC) \
731     do { \
732         TCGv LSB = tcg_temp_new(); \
733         TCGLabel *false_label = gen_new_label(); \
734         TCGLabel *end_label = gen_new_label(); \
735         GET_EA; \
736         PRED; \
737         tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
738         tcg_temp_free(LSB); \
739         gen_vreg_store(ctx, EA, SRCOFF, insn->slot, ALIGN); \
740         INC; \
741         tcg_gen_br(end_label); \
742         gen_set_label(false_label); \
743         tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \
744                        1 << insn->slot); \
745         gen_set_label(end_label); \
746     } while (0)
747 
748 #define fGEN_TCG_PRED_VEC_STORE_pred_pi(ALIGN) \
749     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
750                             fEA_REG(RxV), \
751                             VsV_off, ALIGN, \
752                             fPM_I(RxV, siV * sizeof(MMVector)))
753 #define fGEN_TCG_PRED_VEC_STORE_npred_pi(ALIGN) \
754     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
755                             fEA_REG(RxV), \
756                             VsV_off, ALIGN, \
757                             fPM_I(RxV, siV * sizeof(MMVector)))
758 #define fGEN_TCG_PRED_VEC_STORE_new_pred_pi \
759     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
760                             fEA_REG(RxV), \
761                             OsN_off, true, \
762                             fPM_I(RxV, siV * sizeof(MMVector)))
763 #define fGEN_TCG_PRED_VEC_STORE_new_npred_pi \
764     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
765                             fEA_REG(RxV), \
766                             OsN_off, true, \
767                             fPM_I(RxV, siV * sizeof(MMVector)))
768 
769 #define fGEN_TCG_V6_vS32b_pred_pi(SHORTCODE) \
770     fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
771 #define fGEN_TCG_V6_vS32b_npred_pi(SHORTCODE) \
772     fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
773 #define fGEN_TCG_V6_vS32Ub_pred_pi(SHORTCODE) \
774     fGEN_TCG_PRED_VEC_STORE_pred_pi(false)
775 #define fGEN_TCG_V6_vS32Ub_npred_pi(SHORTCODE) \
776     fGEN_TCG_PRED_VEC_STORE_npred_pi(false)
777 #define fGEN_TCG_V6_vS32b_nt_pred_pi(SHORTCODE) \
778     fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
779 #define fGEN_TCG_V6_vS32b_nt_npred_pi(SHORTCODE) \
780     fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
781 #define fGEN_TCG_V6_vS32b_new_pred_pi(SHORTCODE) \
782     fGEN_TCG_PRED_VEC_STORE_new_pred_pi
783 #define fGEN_TCG_V6_vS32b_new_npred_pi(SHORTCODE) \
784     fGEN_TCG_PRED_VEC_STORE_new_npred_pi
785 #define fGEN_TCG_V6_vS32b_nt_new_pred_pi(SHORTCODE) \
786     fGEN_TCG_PRED_VEC_STORE_new_pred_pi
787 #define fGEN_TCG_V6_vS32b_nt_new_npred_pi(SHORTCODE) \
788     fGEN_TCG_PRED_VEC_STORE_new_npred_pi
789 
790 #define fGEN_TCG_PRED_VEC_STORE_pred_ai(ALIGN) \
791     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
792                             fEA_RI(RtV, siV * sizeof(MMVector)), \
793                             VsV_off, ALIGN, \
794                             do { } while (0))
795 #define fGEN_TCG_PRED_VEC_STORE_npred_ai(ALIGN) \
796     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
797                             fEA_RI(RtV, siV * sizeof(MMVector)), \
798                             VsV_off, ALIGN, \
799                             do { } while (0))
800 #define fGEN_TCG_PRED_VEC_STORE_new_pred_ai \
801     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
802                             fEA_RI(RtV, siV * sizeof(MMVector)), \
803                             OsN_off, true, \
804                             do { } while (0))
805 #define fGEN_TCG_PRED_VEC_STORE_new_npred_ai \
806     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
807                             fEA_RI(RtV, siV * sizeof(MMVector)), \
808                             OsN_off, true, \
809                             do { } while (0))
810 
811 #define fGEN_TCG_V6_vS32b_pred_ai(SHORTCODE) \
812     fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
813 #define fGEN_TCG_V6_vS32b_npred_ai(SHORTCODE) \
814     fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
815 #define fGEN_TCG_V6_vS32Ub_pred_ai(SHORTCODE) \
816     fGEN_TCG_PRED_VEC_STORE_pred_ai(false)
817 #define fGEN_TCG_V6_vS32Ub_npred_ai(SHORTCODE) \
818     fGEN_TCG_PRED_VEC_STORE_npred_ai(false)
819 #define fGEN_TCG_V6_vS32b_nt_pred_ai(SHORTCODE) \
820     fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
821 #define fGEN_TCG_V6_vS32b_nt_npred_ai(SHORTCODE) \
822     fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
823 #define fGEN_TCG_V6_vS32b_new_pred_ai(SHORTCODE) \
824     fGEN_TCG_PRED_VEC_STORE_new_pred_ai
825 #define fGEN_TCG_V6_vS32b_new_npred_ai(SHORTCODE) \
826     fGEN_TCG_PRED_VEC_STORE_new_npred_ai
827 #define fGEN_TCG_V6_vS32b_nt_new_pred_ai(SHORTCODE) \
828     fGEN_TCG_PRED_VEC_STORE_new_pred_ai
829 #define fGEN_TCG_V6_vS32b_nt_new_npred_ai(SHORTCODE) \
830     fGEN_TCG_PRED_VEC_STORE_new_npred_ai
831 
832 #define fGEN_TCG_PRED_VEC_STORE_pred_ppu(ALIGN) \
833     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
834                             fEA_REG(RxV), \
835                             VsV_off, ALIGN, \
836                             fPM_M(RxV, MuV))
837 #define fGEN_TCG_PRED_VEC_STORE_npred_ppu(ALIGN) \
838     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
839                             fEA_REG(RxV), \
840                             VsV_off, ALIGN, \
841                             fPM_M(RxV, MuV))
842 #define fGEN_TCG_PRED_VEC_STORE_new_pred_ppu \
843     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
844                             fEA_REG(RxV), \
845                             OsN_off, true, \
846                             fPM_M(RxV, MuV))
847 #define fGEN_TCG_PRED_VEC_STORE_new_npred_ppu \
848     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
849                             fEA_REG(RxV), \
850                             OsN_off, true, \
851                             fPM_M(RxV, MuV))
852 
853 #define fGEN_TCG_V6_vS32b_pred_ppu(SHORTCODE) \
854     fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
855 #define fGEN_TCG_V6_vS32b_npred_ppu(SHORTCODE) \
856     fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
857 #define fGEN_TCG_V6_vS32Ub_pred_ppu(SHORTCODE) \
858     fGEN_TCG_PRED_VEC_STORE_pred_ppu(false)
859 #define fGEN_TCG_V6_vS32Ub_npred_ppu(SHORTCODE) \
860     fGEN_TCG_PRED_VEC_STORE_npred_ppu(false)
861 #define fGEN_TCG_V6_vS32b_nt_pred_ppu(SHORTCODE) \
862     fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
863 #define fGEN_TCG_V6_vS32b_nt_npred_ppu(SHORTCODE) \
864     fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
865 #define fGEN_TCG_V6_vS32b_new_pred_ppu(SHORTCODE) \
866     fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
867 #define fGEN_TCG_V6_vS32b_new_npred_ppu(SHORTCODE) \
868     fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
869 #define fGEN_TCG_V6_vS32b_nt_new_pred_ppu(SHORTCODE) \
870     fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
871 #define fGEN_TCG_V6_vS32b_nt_new_npred_ppu(SHORTCODE) \
872     fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
873 
874 /* Masked vector stores */
875 #define fGEN_TCG_V6_vS32b_qpred_pi(SHORTCODE)              SHORTCODE
876 #define fGEN_TCG_V6_vS32b_nt_qpred_pi(SHORTCODE)           SHORTCODE
877 #define fGEN_TCG_V6_vS32b_qpred_ai(SHORTCODE)              SHORTCODE
878 #define fGEN_TCG_V6_vS32b_nt_qpred_ai(SHORTCODE)           SHORTCODE
879 #define fGEN_TCG_V6_vS32b_qpred_ppu(SHORTCODE)             SHORTCODE
880 #define fGEN_TCG_V6_vS32b_nt_qpred_ppu(SHORTCODE)          SHORTCODE
881 #define fGEN_TCG_V6_vS32b_nqpred_pi(SHORTCODE)             SHORTCODE
882 #define fGEN_TCG_V6_vS32b_nt_nqpred_pi(SHORTCODE)          SHORTCODE
883 #define fGEN_TCG_V6_vS32b_nqpred_ai(SHORTCODE)             SHORTCODE
884 #define fGEN_TCG_V6_vS32b_nt_nqpred_ai(SHORTCODE)          SHORTCODE
885 #define fGEN_TCG_V6_vS32b_nqpred_ppu(SHORTCODE)            SHORTCODE
886 #define fGEN_TCG_V6_vS32b_nt_nqpred_ppu(SHORTCODE)         SHORTCODE
887 
888 /* Store release not modelled in qemu, but need to suppress compiler warnings */
889 #define fGEN_TCG_V6_vS32b_srls_pi(SHORTCODE) \
890     do { \
891         siV = siV; \
892     } while (0)
893 #define fGEN_TCG_V6_vS32b_srls_ai(SHORTCODE) \
894     do { \
895         RtV = RtV; \
896         siV = siV; \
897     } while (0)
898 #define fGEN_TCG_V6_vS32b_srls_ppu(SHORTCODE) \
899     do { \
900         MuV = MuV; \
901     } while (0)
902 
903 #endif
904