xref: /openbmc/qemu/target/hexagon/gen_tcg_hvx.h (revision e8d1e0cd)
1 /*
2  *  Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify
5  *  it under the terms of the GNU General Public License as published by
6  *  the Free Software Foundation; either version 2 of the License, or
7  *  (at your option) any later version.
8  *
9  *  This program is distributed in the hope that it will be useful,
10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  *  GNU General Public License for more details.
13  *
14  *  You should have received a copy of the GNU General Public License
15  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #ifndef HEXAGON_GEN_TCG_HVX_H
19 #define HEXAGON_GEN_TCG_HVX_H
20 
21 /*
22  * Histogram instructions
23  *
24  * Note that these instructions operate directly on the vector registers
25  * and therefore happen after commit.
26  *
27  * The generate_<tag> function is called twice
28  *     The first time is during the normal TCG generation
29  *         ctx->pre_commit is true
30  *         In the masked cases, we save the mask to the qtmp temporary
31  *         Otherwise, there is nothing to do
32  *     The second call is at the end of gen_commit_packet
33  *         ctx->pre_commit is false
34  *         Generate the call to the helper
35  */
36 
37 static inline void assert_vhist_tmp(DisasContext *ctx)
38 {
39     /* vhist instructions require exactly one .tmp to be defined */
40     g_assert(ctx->tmp_vregs_idx == 1);
41 }
42 
43 #define fGEN_TCG_V6_vhist(SHORTCODE) \
44     if (!ctx->pre_commit) { \
45         assert_vhist_tmp(ctx); \
46         gen_helper_vhist(cpu_env); \
47     }
48 #define fGEN_TCG_V6_vhistq(SHORTCODE) \
49     do { \
50         if (ctx->pre_commit) { \
51             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
52             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
53                              sizeof(MMVector), sizeof(MMVector)); \
54         } else { \
55             assert_vhist_tmp(ctx); \
56             gen_helper_vhistq(cpu_env); \
57         } \
58     } while (0)
59 #define fGEN_TCG_V6_vwhist256(SHORTCODE) \
60     if (!ctx->pre_commit) { \
61         assert_vhist_tmp(ctx); \
62         gen_helper_vwhist256(cpu_env); \
63     }
64 #define fGEN_TCG_V6_vwhist256q(SHORTCODE) \
65     do { \
66         if (ctx->pre_commit) { \
67             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
68             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
69                              sizeof(MMVector), sizeof(MMVector)); \
70         } else { \
71             assert_vhist_tmp(ctx); \
72             gen_helper_vwhist256q(cpu_env); \
73         } \
74     } while (0)
75 #define fGEN_TCG_V6_vwhist256_sat(SHORTCODE) \
76     if (!ctx->pre_commit) { \
77         assert_vhist_tmp(ctx); \
78         gen_helper_vwhist256_sat(cpu_env); \
79     }
80 #define fGEN_TCG_V6_vwhist256q_sat(SHORTCODE) \
81     do { \
82         if (ctx->pre_commit) { \
83             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
84             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
85                              sizeof(MMVector), sizeof(MMVector)); \
86         } else { \
87             assert_vhist_tmp(ctx); \
88             gen_helper_vwhist256q_sat(cpu_env); \
89         } \
90     } while (0)
91 #define fGEN_TCG_V6_vwhist128(SHORTCODE) \
92     if (!ctx->pre_commit) { \
93         assert_vhist_tmp(ctx); \
94         gen_helper_vwhist128(cpu_env); \
95     }
96 #define fGEN_TCG_V6_vwhist128q(SHORTCODE) \
97     do { \
98         if (ctx->pre_commit) { \
99             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
100             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
101                              sizeof(MMVector), sizeof(MMVector)); \
102         } else { \
103             assert_vhist_tmp(ctx); \
104             gen_helper_vwhist128q(cpu_env); \
105         } \
106     } while (0)
107 #define fGEN_TCG_V6_vwhist128m(SHORTCODE) \
108     if (!ctx->pre_commit) { \
109         TCGv tcgv_uiV = tcg_constant_tl(uiV); \
110         assert_vhist_tmp(ctx); \
111         gen_helper_vwhist128m(cpu_env, tcgv_uiV); \
112     }
113 #define fGEN_TCG_V6_vwhist128qm(SHORTCODE) \
114     do { \
115         if (ctx->pre_commit) { \
116             intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \
117             tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \
118                              sizeof(MMVector), sizeof(MMVector)); \
119         } else { \
120             TCGv tcgv_uiV = tcg_constant_tl(uiV); \
121             assert_vhist_tmp(ctx); \
122             gen_helper_vwhist128qm(cpu_env, tcgv_uiV); \
123         } \
124     } while (0)
125 
126 
127 #define fGEN_TCG_V6_vassign(SHORTCODE) \
128     tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
129                      sizeof(MMVector), sizeof(MMVector))
130 
131 /* Vector conditional move */
132 #define fGEN_TCG_VEC_CMOV(PRED) \
133     do { \
134         TCGv lsb = tcg_temp_new(); \
135         TCGLabel *false_label = gen_new_label(); \
136         tcg_gen_andi_tl(lsb, PsV, 1); \
137         tcg_gen_brcondi_tl(TCG_COND_NE, lsb, PRED, false_label); \
138         tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \
139                          sizeof(MMVector), sizeof(MMVector)); \
140         gen_set_label(false_label); \
141     } while (0)
142 
143 
144 /* Vector conditional move (true) */
145 #define fGEN_TCG_V6_vcmov(SHORTCODE) \
146     fGEN_TCG_VEC_CMOV(1)
147 
148 /* Vector conditional move (false) */
149 #define fGEN_TCG_V6_vncmov(SHORTCODE) \
150     fGEN_TCG_VEC_CMOV(0)
151 
152 /* Vector add - various forms */
153 #define fGEN_TCG_V6_vaddb(SHORTCODE) \
154     tcg_gen_gvec_add(MO_8, VdV_off, VuV_off, VvV_off, \
155                      sizeof(MMVector), sizeof(MMVector))
156 
157 #define fGEN_TCG_V6_vaddh(SHORTCYDE) \
158     tcg_gen_gvec_add(MO_16, VdV_off, VuV_off, VvV_off, \
159                      sizeof(MMVector), sizeof(MMVector))
160 
161 #define fGEN_TCG_V6_vaddw(SHORTCODE) \
162     tcg_gen_gvec_add(MO_32, VdV_off, VuV_off, VvV_off, \
163                      sizeof(MMVector), sizeof(MMVector))
164 
165 #define fGEN_TCG_V6_vaddb_dv(SHORTCODE) \
166     tcg_gen_gvec_add(MO_8, VddV_off, VuuV_off, VvvV_off, \
167                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
168 
169 #define fGEN_TCG_V6_vaddh_dv(SHORTCYDE) \
170     tcg_gen_gvec_add(MO_16, VddV_off, VuuV_off, VvvV_off, \
171                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
172 
173 #define fGEN_TCG_V6_vaddw_dv(SHORTCODE) \
174     tcg_gen_gvec_add(MO_32, VddV_off, VuuV_off, VvvV_off, \
175                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
176 
177 /* Vector sub - various forms */
178 #define fGEN_TCG_V6_vsubb(SHORTCODE) \
179     tcg_gen_gvec_sub(MO_8, VdV_off, VuV_off, VvV_off, \
180                      sizeof(MMVector), sizeof(MMVector))
181 
182 #define fGEN_TCG_V6_vsubh(SHORTCODE) \
183     tcg_gen_gvec_sub(MO_16, VdV_off, VuV_off, VvV_off, \
184                      sizeof(MMVector), sizeof(MMVector))
185 
186 #define fGEN_TCG_V6_vsubw(SHORTCODE) \
187     tcg_gen_gvec_sub(MO_32, VdV_off, VuV_off, VvV_off, \
188                      sizeof(MMVector), sizeof(MMVector))
189 
190 #define fGEN_TCG_V6_vsubb_dv(SHORTCODE) \
191     tcg_gen_gvec_sub(MO_8, VddV_off, VuuV_off, VvvV_off, \
192                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
193 
194 #define fGEN_TCG_V6_vsubh_dv(SHORTCODE) \
195     tcg_gen_gvec_sub(MO_16, VddV_off, VuuV_off, VvvV_off, \
196                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
197 
198 #define fGEN_TCG_V6_vsubw_dv(SHORTCODE) \
199     tcg_gen_gvec_sub(MO_32, VddV_off, VuuV_off, VvvV_off, \
200                      sizeof(MMVector) * 2, sizeof(MMVector) * 2)
201 
202 /* Vector shift right - various forms */
203 #define fGEN_TCG_V6_vasrh(SHORTCODE) \
204     do { \
205         TCGv shift = tcg_temp_new(); \
206         tcg_gen_andi_tl(shift, RtV, 15); \
207         tcg_gen_gvec_sars(MO_16, VdV_off, VuV_off, shift, \
208                           sizeof(MMVector), sizeof(MMVector)); \
209     } while (0)
210 
211 #define fGEN_TCG_V6_vasrh_acc(SHORTCODE) \
212     do { \
213         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
214         TCGv shift = tcg_temp_new(); \
215         tcg_gen_andi_tl(shift, RtV, 15); \
216         tcg_gen_gvec_sars(MO_16, tmpoff, VuV_off, shift, \
217                           sizeof(MMVector), sizeof(MMVector)); \
218         tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
219                          sizeof(MMVector), sizeof(MMVector)); \
220     } while (0)
221 
222 #define fGEN_TCG_V6_vasrw(SHORTCODE) \
223     do { \
224         TCGv shift = tcg_temp_new(); \
225         tcg_gen_andi_tl(shift, RtV, 31); \
226         tcg_gen_gvec_sars(MO_32, VdV_off, VuV_off, shift, \
227                           sizeof(MMVector), sizeof(MMVector)); \
228     } while (0)
229 
230 #define fGEN_TCG_V6_vasrw_acc(SHORTCODE) \
231     do { \
232         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
233         TCGv shift = tcg_temp_new(); \
234         tcg_gen_andi_tl(shift, RtV, 31); \
235         tcg_gen_gvec_sars(MO_32, tmpoff, VuV_off, shift, \
236                           sizeof(MMVector), sizeof(MMVector)); \
237         tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
238                           sizeof(MMVector), sizeof(MMVector)); \
239     } while (0)
240 
241 #define fGEN_TCG_V6_vlsrb(SHORTCODE) \
242     do { \
243         TCGv shift = tcg_temp_new(); \
244         tcg_gen_andi_tl(shift, RtV, 7); \
245         tcg_gen_gvec_shrs(MO_8, VdV_off, VuV_off, shift, \
246                           sizeof(MMVector), sizeof(MMVector)); \
247     } while (0)
248 
249 #define fGEN_TCG_V6_vlsrh(SHORTCODE) \
250     do { \
251         TCGv shift = tcg_temp_new(); \
252         tcg_gen_andi_tl(shift, RtV, 15); \
253         tcg_gen_gvec_shrs(MO_16, VdV_off, VuV_off, shift, \
254                           sizeof(MMVector), sizeof(MMVector)); \
255     } while (0)
256 
257 #define fGEN_TCG_V6_vlsrw(SHORTCODE) \
258     do { \
259         TCGv shift = tcg_temp_new(); \
260         tcg_gen_andi_tl(shift, RtV, 31); \
261         tcg_gen_gvec_shrs(MO_32, VdV_off, VuV_off, shift, \
262                           sizeof(MMVector), sizeof(MMVector)); \
263     } while (0)
264 
265 /* Vector shift left - various forms */
266 #define fGEN_TCG_V6_vaslb(SHORTCODE) \
267     do { \
268         TCGv shift = tcg_temp_new(); \
269         tcg_gen_andi_tl(shift, RtV, 7); \
270         tcg_gen_gvec_shls(MO_8, VdV_off, VuV_off, shift, \
271                           sizeof(MMVector), sizeof(MMVector)); \
272     } while (0)
273 
274 #define fGEN_TCG_V6_vaslh(SHORTCODE) \
275     do { \
276         TCGv shift = tcg_temp_new(); \
277         tcg_gen_andi_tl(shift, RtV, 15); \
278         tcg_gen_gvec_shls(MO_16, VdV_off, VuV_off, shift, \
279                           sizeof(MMVector), sizeof(MMVector)); \
280     } while (0)
281 
282 #define fGEN_TCG_V6_vaslh_acc(SHORTCODE) \
283     do { \
284         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
285         TCGv shift = tcg_temp_new(); \
286         tcg_gen_andi_tl(shift, RtV, 15); \
287         tcg_gen_gvec_shls(MO_16, tmpoff, VuV_off, shift, \
288                           sizeof(MMVector), sizeof(MMVector)); \
289         tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \
290                          sizeof(MMVector), sizeof(MMVector)); \
291     } while (0)
292 
293 #define fGEN_TCG_V6_vaslw(SHORTCODE) \
294     do { \
295         TCGv shift = tcg_temp_new(); \
296         tcg_gen_andi_tl(shift, RtV, 31); \
297         tcg_gen_gvec_shls(MO_32, VdV_off, VuV_off, shift, \
298                           sizeof(MMVector), sizeof(MMVector)); \
299     } while (0)
300 
301 #define fGEN_TCG_V6_vaslw_acc(SHORTCODE) \
302     do { \
303         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
304         TCGv shift = tcg_temp_new(); \
305         tcg_gen_andi_tl(shift, RtV, 31); \
306         tcg_gen_gvec_shls(MO_32, tmpoff, VuV_off, shift, \
307                           sizeof(MMVector), sizeof(MMVector)); \
308         tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \
309                          sizeof(MMVector), sizeof(MMVector)); \
310     } while (0)
311 
312 /* Vector max - various forms */
313 #define fGEN_TCG_V6_vmaxw(SHORTCODE) \
314     tcg_gen_gvec_smax(MO_32, VdV_off, VuV_off, VvV_off, \
315                       sizeof(MMVector), sizeof(MMVector))
316 #define fGEN_TCG_V6_vmaxh(SHORTCODE) \
317     tcg_gen_gvec_smax(MO_16, VdV_off, VuV_off, VvV_off, \
318                       sizeof(MMVector), sizeof(MMVector))
319 #define fGEN_TCG_V6_vmaxuh(SHORTCODE) \
320     tcg_gen_gvec_umax(MO_16, VdV_off, VuV_off, VvV_off, \
321                       sizeof(MMVector), sizeof(MMVector))
322 #define fGEN_TCG_V6_vmaxb(SHORTCODE) \
323     tcg_gen_gvec_smax(MO_8, VdV_off, VuV_off, VvV_off, \
324                       sizeof(MMVector), sizeof(MMVector))
325 #define fGEN_TCG_V6_vmaxub(SHORTCODE) \
326     tcg_gen_gvec_umax(MO_8, VdV_off, VuV_off, VvV_off, \
327                       sizeof(MMVector), sizeof(MMVector))
328 
329 /* Vector min - various forms */
330 #define fGEN_TCG_V6_vminw(SHORTCODE) \
331     tcg_gen_gvec_smin(MO_32, VdV_off, VuV_off, VvV_off, \
332                       sizeof(MMVector), sizeof(MMVector))
333 #define fGEN_TCG_V6_vminh(SHORTCODE) \
334     tcg_gen_gvec_smin(MO_16, VdV_off, VuV_off, VvV_off, \
335                       sizeof(MMVector), sizeof(MMVector))
336 #define fGEN_TCG_V6_vminuh(SHORTCODE) \
337     tcg_gen_gvec_umin(MO_16, VdV_off, VuV_off, VvV_off, \
338                       sizeof(MMVector), sizeof(MMVector))
339 #define fGEN_TCG_V6_vminb(SHORTCODE) \
340     tcg_gen_gvec_smin(MO_8, VdV_off, VuV_off, VvV_off, \
341                       sizeof(MMVector), sizeof(MMVector))
342 #define fGEN_TCG_V6_vminub(SHORTCODE) \
343     tcg_gen_gvec_umin(MO_8, VdV_off, VuV_off, VvV_off, \
344                       sizeof(MMVector), sizeof(MMVector))
345 
346 /* Vector logical ops */
347 #define fGEN_TCG_V6_vxor(SHORTCODE) \
348     tcg_gen_gvec_xor(MO_64, VdV_off, VuV_off, VvV_off, \
349                      sizeof(MMVector), sizeof(MMVector))
350 
351 #define fGEN_TCG_V6_vand(SHORTCODE) \
352     tcg_gen_gvec_and(MO_64, VdV_off, VuV_off, VvV_off, \
353                      sizeof(MMVector), sizeof(MMVector))
354 
355 #define fGEN_TCG_V6_vor(SHORTCODE) \
356     tcg_gen_gvec_or(MO_64, VdV_off, VuV_off, VvV_off, \
357                     sizeof(MMVector), sizeof(MMVector))
358 
359 #define fGEN_TCG_V6_vnot(SHORTCODE) \
360     tcg_gen_gvec_not(MO_64, VdV_off, VuV_off, \
361                      sizeof(MMVector), sizeof(MMVector))
362 
363 /* Q register logical ops */
364 #define fGEN_TCG_V6_pred_or(SHORTCODE) \
365     tcg_gen_gvec_or(MO_64, QdV_off, QsV_off, QtV_off, \
366                     sizeof(MMQReg), sizeof(MMQReg))
367 
368 #define fGEN_TCG_V6_pred_and(SHORTCODE) \
369     tcg_gen_gvec_and(MO_64, QdV_off, QsV_off, QtV_off, \
370                      sizeof(MMQReg), sizeof(MMQReg))
371 
372 #define fGEN_TCG_V6_pred_xor(SHORTCODE) \
373     tcg_gen_gvec_xor(MO_64, QdV_off, QsV_off, QtV_off, \
374                      sizeof(MMQReg), sizeof(MMQReg))
375 
376 #define fGEN_TCG_V6_pred_or_n(SHORTCODE) \
377     tcg_gen_gvec_orc(MO_64, QdV_off, QsV_off, QtV_off, \
378                      sizeof(MMQReg), sizeof(MMQReg))
379 
380 #define fGEN_TCG_V6_pred_and_n(SHORTCODE) \
381     tcg_gen_gvec_andc(MO_64, QdV_off, QsV_off, QtV_off, \
382                       sizeof(MMQReg), sizeof(MMQReg))
383 
384 #define fGEN_TCG_V6_pred_not(SHORTCODE) \
385     tcg_gen_gvec_not(MO_64, QdV_off, QsV_off, \
386                      sizeof(MMQReg), sizeof(MMQReg))
387 
388 /* Vector compares */
389 #define fGEN_TCG_VEC_CMP(COND, TYPE, SIZE) \
390     do { \
391         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
392         tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
393                          sizeof(MMVector), sizeof(MMVector)); \
394         vec_to_qvec(SIZE, QdV_off, tmpoff); \
395     } while (0)
396 
397 #define fGEN_TCG_V6_vgtw(SHORTCODE) \
398     fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_32, 4)
399 #define fGEN_TCG_V6_vgth(SHORTCODE) \
400     fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_16, 2)
401 #define fGEN_TCG_V6_vgtb(SHORTCODE) \
402     fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_8, 1)
403 
404 #define fGEN_TCG_V6_vgtuw(SHORTCODE) \
405     fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_32, 4)
406 #define fGEN_TCG_V6_vgtuh(SHORTCODE) \
407     fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_16, 2)
408 #define fGEN_TCG_V6_vgtub(SHORTCODE) \
409     fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_8, 1)
410 
411 #define fGEN_TCG_V6_veqw(SHORTCODE) \
412     fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_32, 4)
413 #define fGEN_TCG_V6_veqh(SHORTCODE) \
414     fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_16, 2)
415 #define fGEN_TCG_V6_veqb(SHORTCODE) \
416     fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_8, 1)
417 
418 #define fGEN_TCG_VEC_CMP_OP(COND, TYPE, SIZE, OP) \
419     do { \
420         intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \
421         intptr_t qoff = offsetof(CPUHexagonState, qtmp); \
422         tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \
423                          sizeof(MMVector), sizeof(MMVector)); \
424         vec_to_qvec(SIZE, qoff, tmpoff); \
425         OP(MO_64, QxV_off, QxV_off, qoff, sizeof(MMQReg), sizeof(MMQReg)); \
426     } while (0)
427 
428 #define fGEN_TCG_V6_vgtw_and(SHORTCODE) \
429     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_and)
430 #define fGEN_TCG_V6_vgtw_or(SHORTCODE) \
431     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_or)
432 #define fGEN_TCG_V6_vgtw_xor(SHORTCODE) \
433     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_xor)
434 
435 #define fGEN_TCG_V6_vgtuw_and(SHORTCODE) \
436     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_and)
437 #define fGEN_TCG_V6_vgtuw_or(SHORTCODE) \
438     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_or)
439 #define fGEN_TCG_V6_vgtuw_xor(SHORTCODE) \
440     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_xor)
441 
442 #define fGEN_TCG_V6_vgth_and(SHORTCODE) \
443     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_and)
444 #define fGEN_TCG_V6_vgth_or(SHORTCODE) \
445     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_or)
446 #define fGEN_TCG_V6_vgth_xor(SHORTCODE) \
447     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_xor)
448 
449 #define fGEN_TCG_V6_vgtuh_and(SHORTCODE) \
450     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_and)
451 #define fGEN_TCG_V6_vgtuh_or(SHORTCODE) \
452     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_or)
453 #define fGEN_TCG_V6_vgtuh_xor(SHORTCODE) \
454     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_xor)
455 
456 #define fGEN_TCG_V6_vgtb_and(SHORTCODE) \
457     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_and)
458 #define fGEN_TCG_V6_vgtb_or(SHORTCODE) \
459     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_or)
460 #define fGEN_TCG_V6_vgtb_xor(SHORTCODE) \
461     fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_xor)
462 
463 #define fGEN_TCG_V6_vgtub_and(SHORTCODE) \
464     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_and)
465 #define fGEN_TCG_V6_vgtub_or(SHORTCODE) \
466     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_or)
467 #define fGEN_TCG_V6_vgtub_xor(SHORTCODE) \
468     fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_xor)
469 
470 #define fGEN_TCG_V6_veqw_and(SHORTCODE) \
471     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_and)
472 #define fGEN_TCG_V6_veqw_or(SHORTCODE) \
473     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_or)
474 #define fGEN_TCG_V6_veqw_xor(SHORTCODE) \
475     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_xor)
476 
477 #define fGEN_TCG_V6_veqh_and(SHORTCODE) \
478     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_and)
479 #define fGEN_TCG_V6_veqh_or(SHORTCODE) \
480     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_or)
481 #define fGEN_TCG_V6_veqh_xor(SHORTCODE) \
482     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_xor)
483 
484 #define fGEN_TCG_V6_veqb_and(SHORTCODE) \
485     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_and)
486 #define fGEN_TCG_V6_veqb_or(SHORTCODE) \
487     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_or)
488 #define fGEN_TCG_V6_veqb_xor(SHORTCODE) \
489     fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_xor)
490 
491 /* Vector splat - various forms */
492 #define fGEN_TCG_V6_lvsplatw(SHORTCODE) \
493     tcg_gen_gvec_dup_i32(MO_32, VdV_off, \
494                          sizeof(MMVector), sizeof(MMVector), RtV)
495 
496 #define fGEN_TCG_V6_lvsplath(SHORTCODE) \
497     tcg_gen_gvec_dup_i32(MO_16, VdV_off, \
498                          sizeof(MMVector), sizeof(MMVector), RtV)
499 
500 #define fGEN_TCG_V6_lvsplatb(SHORTCODE) \
501     tcg_gen_gvec_dup_i32(MO_8, VdV_off, \
502                          sizeof(MMVector), sizeof(MMVector), RtV)
503 
504 /* Vector absolute value - various forms */
505 #define fGEN_TCG_V6_vabsb(SHORTCODE) \
506     tcg_gen_gvec_abs(MO_8, VdV_off, VuV_off, \
507                      sizeof(MMVector), sizeof(MMVector))
508 
509 #define fGEN_TCG_V6_vabsh(SHORTCODE) \
510     tcg_gen_gvec_abs(MO_16, VdV_off, VuV_off, \
511                      sizeof(MMVector), sizeof(MMVector))
512 
513 #define fGEN_TCG_V6_vabsw(SHORTCODE) \
514     tcg_gen_gvec_abs(MO_32, VdV_off, VuV_off, \
515                      sizeof(MMVector), sizeof(MMVector))
516 
517 /* Vector loads */
518 #define fGEN_TCG_V6_vL32b_pi(SHORTCODE)                    SHORTCODE
519 #define fGEN_TCG_V6_vL32Ub_pi(SHORTCODE)                   SHORTCODE
520 #define fGEN_TCG_V6_vL32b_cur_pi(SHORTCODE)                SHORTCODE
521 #define fGEN_TCG_V6_vL32b_tmp_pi(SHORTCODE)                SHORTCODE
522 #define fGEN_TCG_V6_vL32b_nt_pi(SHORTCODE)                 SHORTCODE
523 #define fGEN_TCG_V6_vL32b_nt_cur_pi(SHORTCODE)             SHORTCODE
524 #define fGEN_TCG_V6_vL32b_nt_tmp_pi(SHORTCODE)             SHORTCODE
525 #define fGEN_TCG_V6_vL32b_ai(SHORTCODE)                    SHORTCODE
526 #define fGEN_TCG_V6_vL32Ub_ai(SHORTCODE)                   SHORTCODE
527 #define fGEN_TCG_V6_vL32b_cur_ai(SHORTCODE)                SHORTCODE
528 #define fGEN_TCG_V6_vL32b_tmp_ai(SHORTCODE)                SHORTCODE
529 #define fGEN_TCG_V6_vL32b_nt_ai(SHORTCODE)                 SHORTCODE
530 #define fGEN_TCG_V6_vL32b_nt_cur_ai(SHORTCODE)             SHORTCODE
531 #define fGEN_TCG_V6_vL32b_nt_tmp_ai(SHORTCODE)             SHORTCODE
532 #define fGEN_TCG_V6_vL32b_ppu(SHORTCODE)                   SHORTCODE
533 #define fGEN_TCG_V6_vL32Ub_ppu(SHORTCODE)                  SHORTCODE
534 #define fGEN_TCG_V6_vL32b_cur_ppu(SHORTCODE)               SHORTCODE
535 #define fGEN_TCG_V6_vL32b_tmp_ppu(SHORTCODE)               SHORTCODE
536 #define fGEN_TCG_V6_vL32b_nt_ppu(SHORTCODE)                SHORTCODE
537 #define fGEN_TCG_V6_vL32b_nt_cur_ppu(SHORTCODE)            SHORTCODE
538 #define fGEN_TCG_V6_vL32b_nt_tmp_ppu(SHORTCODE)            SHORTCODE
539 
540 /* Predicated vector loads */
541 #define fGEN_TCG_PRED_VEC_LOAD(GET_EA, PRED, DSTOFF, INC) \
542     do { \
543         TCGv LSB = tcg_temp_new(); \
544         TCGLabel *false_label = gen_new_label(); \
545         GET_EA; \
546         PRED; \
547         tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
548         gen_vreg_load(ctx, DSTOFF, EA, true); \
549         INC; \
550         gen_set_label(false_label); \
551     } while (0)
552 
553 #define fGEN_TCG_PRED_VEC_LOAD_pred_pi \
554     fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
555                            fEA_REG(RxV), \
556                            VdV_off, \
557                            fPM_I(RxV, siV * sizeof(MMVector)))
558 #define fGEN_TCG_PRED_VEC_LOAD_npred_pi \
559     fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
560                            fEA_REG(RxV), \
561                            VdV_off, \
562                            fPM_I(RxV, siV * sizeof(MMVector)))
563 
564 #define fGEN_TCG_V6_vL32b_pred_pi(SHORTCODE) \
565     fGEN_TCG_PRED_VEC_LOAD_pred_pi
566 #define fGEN_TCG_V6_vL32b_npred_pi(SHORTCODE) \
567     fGEN_TCG_PRED_VEC_LOAD_npred_pi
568 #define fGEN_TCG_V6_vL32b_cur_pred_pi(SHORTCODE) \
569     fGEN_TCG_PRED_VEC_LOAD_pred_pi
570 #define fGEN_TCG_V6_vL32b_cur_npred_pi(SHORTCODE) \
571     fGEN_TCG_PRED_VEC_LOAD_npred_pi
572 #define fGEN_TCG_V6_vL32b_tmp_pred_pi(SHORTCODE) \
573     fGEN_TCG_PRED_VEC_LOAD_pred_pi
574 #define fGEN_TCG_V6_vL32b_tmp_npred_pi(SHORTCODE) \
575     fGEN_TCG_PRED_VEC_LOAD_npred_pi
576 #define fGEN_TCG_V6_vL32b_nt_pred_pi(SHORTCODE) \
577     fGEN_TCG_PRED_VEC_LOAD_pred_pi
578 #define fGEN_TCG_V6_vL32b_nt_npred_pi(SHORTCODE) \
579     fGEN_TCG_PRED_VEC_LOAD_npred_pi
580 #define fGEN_TCG_V6_vL32b_nt_cur_pred_pi(SHORTCODE) \
581     fGEN_TCG_PRED_VEC_LOAD_pred_pi
582 #define fGEN_TCG_V6_vL32b_nt_cur_npred_pi(SHORTCODE) \
583     fGEN_TCG_PRED_VEC_LOAD_npred_pi
584 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_pi(SHORTCODE) \
585     fGEN_TCG_PRED_VEC_LOAD_pred_pi
586 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_pi(SHORTCODE) \
587     fGEN_TCG_PRED_VEC_LOAD_npred_pi
588 
589 #define fGEN_TCG_PRED_VEC_LOAD_pred_ai \
590     fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
591                            fEA_RI(RtV, siV * sizeof(MMVector)), \
592                            VdV_off, \
593                            do {} while (0))
594 #define fGEN_TCG_PRED_VEC_LOAD_npred_ai \
595     fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
596                            fEA_RI(RtV, siV * sizeof(MMVector)), \
597                            VdV_off, \
598                            do {} while (0))
599 
600 #define fGEN_TCG_V6_vL32b_pred_ai(SHORTCODE) \
601     fGEN_TCG_PRED_VEC_LOAD_pred_ai
602 #define fGEN_TCG_V6_vL32b_npred_ai(SHORTCODE) \
603     fGEN_TCG_PRED_VEC_LOAD_npred_ai
604 #define fGEN_TCG_V6_vL32b_cur_pred_ai(SHORTCODE) \
605     fGEN_TCG_PRED_VEC_LOAD_pred_ai
606 #define fGEN_TCG_V6_vL32b_cur_npred_ai(SHORTCODE) \
607     fGEN_TCG_PRED_VEC_LOAD_npred_ai
608 #define fGEN_TCG_V6_vL32b_tmp_pred_ai(SHORTCODE) \
609     fGEN_TCG_PRED_VEC_LOAD_pred_ai
610 #define fGEN_TCG_V6_vL32b_tmp_npred_ai(SHORTCODE) \
611     fGEN_TCG_PRED_VEC_LOAD_npred_ai
612 #define fGEN_TCG_V6_vL32b_nt_pred_ai(SHORTCODE) \
613     fGEN_TCG_PRED_VEC_LOAD_pred_ai
614 #define fGEN_TCG_V6_vL32b_nt_npred_ai(SHORTCODE) \
615     fGEN_TCG_PRED_VEC_LOAD_npred_ai
616 #define fGEN_TCG_V6_vL32b_nt_cur_pred_ai(SHORTCODE) \
617     fGEN_TCG_PRED_VEC_LOAD_pred_ai
618 #define fGEN_TCG_V6_vL32b_nt_cur_npred_ai(SHORTCODE) \
619     fGEN_TCG_PRED_VEC_LOAD_npred_ai
620 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_ai(SHORTCODE) \
621     fGEN_TCG_PRED_VEC_LOAD_pred_ai
622 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_ai(SHORTCODE) \
623     fGEN_TCG_PRED_VEC_LOAD_npred_ai
624 
625 #define fGEN_TCG_PRED_VEC_LOAD_pred_ppu \
626     fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \
627                            fEA_REG(RxV), \
628                            VdV_off, \
629                            fPM_M(RxV, MuV))
630 #define fGEN_TCG_PRED_VEC_LOAD_npred_ppu \
631     fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \
632                            fEA_REG(RxV), \
633                            VdV_off, \
634                            fPM_M(RxV, MuV))
635 
636 #define fGEN_TCG_V6_vL32b_pred_ppu(SHORTCODE) \
637     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
638 #define fGEN_TCG_V6_vL32b_npred_ppu(SHORTCODE) \
639     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
640 #define fGEN_TCG_V6_vL32b_cur_pred_ppu(SHORTCODE) \
641     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
642 #define fGEN_TCG_V6_vL32b_cur_npred_ppu(SHORTCODE) \
643     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
644 #define fGEN_TCG_V6_vL32b_tmp_pred_ppu(SHORTCODE) \
645     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
646 #define fGEN_TCG_V6_vL32b_tmp_npred_ppu(SHORTCODE) \
647     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
648 #define fGEN_TCG_V6_vL32b_nt_pred_ppu(SHORTCODE) \
649     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
650 #define fGEN_TCG_V6_vL32b_nt_npred_ppu(SHORTCODE) \
651     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
652 #define fGEN_TCG_V6_vL32b_nt_cur_pred_ppu(SHORTCODE) \
653     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
654 #define fGEN_TCG_V6_vL32b_nt_cur_npred_ppu(SHORTCODE) \
655     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
656 #define fGEN_TCG_V6_vL32b_nt_tmp_pred_ppu(SHORTCODE) \
657     fGEN_TCG_PRED_VEC_LOAD_pred_ppu
658 #define fGEN_TCG_V6_vL32b_nt_tmp_npred_ppu(SHORTCODE) \
659     fGEN_TCG_PRED_VEC_LOAD_npred_ppu
660 
661 /* Vector stores */
662 #define fGEN_TCG_V6_vS32b_pi(SHORTCODE)                    SHORTCODE
663 #define fGEN_TCG_V6_vS32Ub_pi(SHORTCODE)                   SHORTCODE
664 #define fGEN_TCG_V6_vS32b_nt_pi(SHORTCODE)                 SHORTCODE
665 #define fGEN_TCG_V6_vS32b_ai(SHORTCODE)                    SHORTCODE
666 #define fGEN_TCG_V6_vS32Ub_ai(SHORTCODE)                   SHORTCODE
667 #define fGEN_TCG_V6_vS32b_nt_ai(SHORTCODE)                 SHORTCODE
668 #define fGEN_TCG_V6_vS32b_ppu(SHORTCODE)                   SHORTCODE
669 #define fGEN_TCG_V6_vS32Ub_ppu(SHORTCODE)                  SHORTCODE
670 #define fGEN_TCG_V6_vS32b_nt_ppu(SHORTCODE)                SHORTCODE
671 
672 /* New value vector stores */
673 #define fGEN_TCG_NEWVAL_VEC_STORE(GET_EA, INC) \
674     do { \
675         GET_EA; \
676         gen_vreg_store(ctx, EA, OsN_off, insn->slot, true); \
677         INC; \
678     } while (0)
679 
680 #define fGEN_TCG_NEWVAL_VEC_STORE_pi \
681     fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_I(RxV, siV * sizeof(MMVector)))
682 
683 #define fGEN_TCG_V6_vS32b_new_pi(SHORTCODE) \
684     fGEN_TCG_NEWVAL_VEC_STORE_pi
685 #define fGEN_TCG_V6_vS32b_nt_new_pi(SHORTCODE) \
686     fGEN_TCG_NEWVAL_VEC_STORE_pi
687 
688 #define fGEN_TCG_NEWVAL_VEC_STORE_ai \
689     fGEN_TCG_NEWVAL_VEC_STORE(fEA_RI(RtV, siV * sizeof(MMVector)), \
690                               do { } while (0))
691 
692 #define fGEN_TCG_V6_vS32b_new_ai(SHORTCODE) \
693     fGEN_TCG_NEWVAL_VEC_STORE_ai
694 #define fGEN_TCG_V6_vS32b_nt_new_ai(SHORTCODE) \
695     fGEN_TCG_NEWVAL_VEC_STORE_ai
696 
697 #define fGEN_TCG_NEWVAL_VEC_STORE_ppu \
698     fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_M(RxV, MuV))
699 
700 #define fGEN_TCG_V6_vS32b_new_ppu(SHORTCODE) \
701     fGEN_TCG_NEWVAL_VEC_STORE_ppu
702 #define fGEN_TCG_V6_vS32b_nt_new_ppu(SHORTCODE) \
703     fGEN_TCG_NEWVAL_VEC_STORE_ppu
704 
705 /* Predicated vector stores */
706 #define fGEN_TCG_PRED_VEC_STORE(GET_EA, PRED, SRCOFF, ALIGN, INC) \
707     do { \
708         TCGv LSB = tcg_temp_new(); \
709         TCGLabel *false_label = gen_new_label(); \
710         GET_EA; \
711         PRED; \
712         tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \
713         gen_vreg_store(ctx, EA, SRCOFF, insn->slot, ALIGN); \
714         INC; \
715         gen_set_label(false_label); \
716     } while (0)
717 
718 #define fGEN_TCG_PRED_VEC_STORE_pred_pi(ALIGN) \
719     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
720                             fEA_REG(RxV), \
721                             VsV_off, ALIGN, \
722                             fPM_I(RxV, siV * sizeof(MMVector)))
723 #define fGEN_TCG_PRED_VEC_STORE_npred_pi(ALIGN) \
724     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
725                             fEA_REG(RxV), \
726                             VsV_off, ALIGN, \
727                             fPM_I(RxV, siV * sizeof(MMVector)))
728 #define fGEN_TCG_PRED_VEC_STORE_new_pred_pi \
729     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
730                             fEA_REG(RxV), \
731                             OsN_off, true, \
732                             fPM_I(RxV, siV * sizeof(MMVector)))
733 #define fGEN_TCG_PRED_VEC_STORE_new_npred_pi \
734     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
735                             fEA_REG(RxV), \
736                             OsN_off, true, \
737                             fPM_I(RxV, siV * sizeof(MMVector)))
738 
739 #define fGEN_TCG_V6_vS32b_pred_pi(SHORTCODE) \
740     fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
741 #define fGEN_TCG_V6_vS32b_npred_pi(SHORTCODE) \
742     fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
743 #define fGEN_TCG_V6_vS32Ub_pred_pi(SHORTCODE) \
744     fGEN_TCG_PRED_VEC_STORE_pred_pi(false)
745 #define fGEN_TCG_V6_vS32Ub_npred_pi(SHORTCODE) \
746     fGEN_TCG_PRED_VEC_STORE_npred_pi(false)
747 #define fGEN_TCG_V6_vS32b_nt_pred_pi(SHORTCODE) \
748     fGEN_TCG_PRED_VEC_STORE_pred_pi(true)
749 #define fGEN_TCG_V6_vS32b_nt_npred_pi(SHORTCODE) \
750     fGEN_TCG_PRED_VEC_STORE_npred_pi(true)
751 #define fGEN_TCG_V6_vS32b_new_pred_pi(SHORTCODE) \
752     fGEN_TCG_PRED_VEC_STORE_new_pred_pi
753 #define fGEN_TCG_V6_vS32b_new_npred_pi(SHORTCODE) \
754     fGEN_TCG_PRED_VEC_STORE_new_npred_pi
755 #define fGEN_TCG_V6_vS32b_nt_new_pred_pi(SHORTCODE) \
756     fGEN_TCG_PRED_VEC_STORE_new_pred_pi
757 #define fGEN_TCG_V6_vS32b_nt_new_npred_pi(SHORTCODE) \
758     fGEN_TCG_PRED_VEC_STORE_new_npred_pi
759 
760 #define fGEN_TCG_PRED_VEC_STORE_pred_ai(ALIGN) \
761     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
762                             fEA_RI(RtV, siV * sizeof(MMVector)), \
763                             VsV_off, ALIGN, \
764                             do { } while (0))
765 #define fGEN_TCG_PRED_VEC_STORE_npred_ai(ALIGN) \
766     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
767                             fEA_RI(RtV, siV * sizeof(MMVector)), \
768                             VsV_off, ALIGN, \
769                             do { } while (0))
770 #define fGEN_TCG_PRED_VEC_STORE_new_pred_ai \
771     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
772                             fEA_RI(RtV, siV * sizeof(MMVector)), \
773                             OsN_off, true, \
774                             do { } while (0))
775 #define fGEN_TCG_PRED_VEC_STORE_new_npred_ai \
776     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
777                             fEA_RI(RtV, siV * sizeof(MMVector)), \
778                             OsN_off, true, \
779                             do { } while (0))
780 
781 #define fGEN_TCG_V6_vS32b_pred_ai(SHORTCODE) \
782     fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
783 #define fGEN_TCG_V6_vS32b_npred_ai(SHORTCODE) \
784     fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
785 #define fGEN_TCG_V6_vS32Ub_pred_ai(SHORTCODE) \
786     fGEN_TCG_PRED_VEC_STORE_pred_ai(false)
787 #define fGEN_TCG_V6_vS32Ub_npred_ai(SHORTCODE) \
788     fGEN_TCG_PRED_VEC_STORE_npred_ai(false)
789 #define fGEN_TCG_V6_vS32b_nt_pred_ai(SHORTCODE) \
790     fGEN_TCG_PRED_VEC_STORE_pred_ai(true)
791 #define fGEN_TCG_V6_vS32b_nt_npred_ai(SHORTCODE) \
792     fGEN_TCG_PRED_VEC_STORE_npred_ai(true)
793 #define fGEN_TCG_V6_vS32b_new_pred_ai(SHORTCODE) \
794     fGEN_TCG_PRED_VEC_STORE_new_pred_ai
795 #define fGEN_TCG_V6_vS32b_new_npred_ai(SHORTCODE) \
796     fGEN_TCG_PRED_VEC_STORE_new_npred_ai
797 #define fGEN_TCG_V6_vS32b_nt_new_pred_ai(SHORTCODE) \
798     fGEN_TCG_PRED_VEC_STORE_new_pred_ai
799 #define fGEN_TCG_V6_vS32b_nt_new_npred_ai(SHORTCODE) \
800     fGEN_TCG_PRED_VEC_STORE_new_npred_ai
801 
802 #define fGEN_TCG_PRED_VEC_STORE_pred_ppu(ALIGN) \
803     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
804                             fEA_REG(RxV), \
805                             VsV_off, ALIGN, \
806                             fPM_M(RxV, MuV))
807 #define fGEN_TCG_PRED_VEC_STORE_npred_ppu(ALIGN) \
808     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
809                             fEA_REG(RxV), \
810                             VsV_off, ALIGN, \
811                             fPM_M(RxV, MuV))
812 #define fGEN_TCG_PRED_VEC_STORE_new_pred_ppu \
813     fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \
814                             fEA_REG(RxV), \
815                             OsN_off, true, \
816                             fPM_M(RxV, MuV))
817 #define fGEN_TCG_PRED_VEC_STORE_new_npred_ppu \
818     fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \
819                             fEA_REG(RxV), \
820                             OsN_off, true, \
821                             fPM_M(RxV, MuV))
822 
823 #define fGEN_TCG_V6_vS32b_pred_ppu(SHORTCODE) \
824     fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
825 #define fGEN_TCG_V6_vS32b_npred_ppu(SHORTCODE) \
826     fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
827 #define fGEN_TCG_V6_vS32Ub_pred_ppu(SHORTCODE) \
828     fGEN_TCG_PRED_VEC_STORE_pred_ppu(false)
829 #define fGEN_TCG_V6_vS32Ub_npred_ppu(SHORTCODE) \
830     fGEN_TCG_PRED_VEC_STORE_npred_ppu(false)
831 #define fGEN_TCG_V6_vS32b_nt_pred_ppu(SHORTCODE) \
832     fGEN_TCG_PRED_VEC_STORE_pred_ppu(true)
833 #define fGEN_TCG_V6_vS32b_nt_npred_ppu(SHORTCODE) \
834     fGEN_TCG_PRED_VEC_STORE_npred_ppu(true)
835 #define fGEN_TCG_V6_vS32b_new_pred_ppu(SHORTCODE) \
836     fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
837 #define fGEN_TCG_V6_vS32b_new_npred_ppu(SHORTCODE) \
838     fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
839 #define fGEN_TCG_V6_vS32b_nt_new_pred_ppu(SHORTCODE) \
840     fGEN_TCG_PRED_VEC_STORE_new_pred_ppu
841 #define fGEN_TCG_V6_vS32b_nt_new_npred_ppu(SHORTCODE) \
842     fGEN_TCG_PRED_VEC_STORE_new_npred_ppu
843 
844 /* Masked vector stores */
845 #define fGEN_TCG_V6_vS32b_qpred_pi(SHORTCODE)              SHORTCODE
846 #define fGEN_TCG_V6_vS32b_nt_qpred_pi(SHORTCODE)           SHORTCODE
847 #define fGEN_TCG_V6_vS32b_qpred_ai(SHORTCODE)              SHORTCODE
848 #define fGEN_TCG_V6_vS32b_nt_qpred_ai(SHORTCODE)           SHORTCODE
849 #define fGEN_TCG_V6_vS32b_qpred_ppu(SHORTCODE)             SHORTCODE
850 #define fGEN_TCG_V6_vS32b_nt_qpred_ppu(SHORTCODE)          SHORTCODE
851 #define fGEN_TCG_V6_vS32b_nqpred_pi(SHORTCODE)             SHORTCODE
852 #define fGEN_TCG_V6_vS32b_nt_nqpred_pi(SHORTCODE)          SHORTCODE
853 #define fGEN_TCG_V6_vS32b_nqpred_ai(SHORTCODE)             SHORTCODE
854 #define fGEN_TCG_V6_vS32b_nt_nqpred_ai(SHORTCODE)          SHORTCODE
855 #define fGEN_TCG_V6_vS32b_nqpred_ppu(SHORTCODE)            SHORTCODE
856 #define fGEN_TCG_V6_vS32b_nt_nqpred_ppu(SHORTCODE)         SHORTCODE
857 
858 /* Store release not modelled in qemu, but need to suppress compiler warnings */
859 #define fGEN_TCG_V6_vS32b_srls_pi(SHORTCODE) \
860     do { \
861         siV = siV; \
862     } while (0)
863 #define fGEN_TCG_V6_vS32b_srls_ai(SHORTCODE) \
864     do { \
865         RtV = RtV; \
866         siV = siV; \
867     } while (0)
868 #define fGEN_TCG_V6_vS32b_srls_ppu(SHORTCODE) \
869     do { \
870         MuV = MuV; \
871     } while (0)
872 
873 #endif
874