1/*
2 * QEMU TCG support -- s390x vector instruction translation functions
3 *
4 * Copyright (C) 2019 Red Hat Inc
5 *
6 * Authors:
7 *   David Hildenbrand <david@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
12
13/*
14 * For most instructions that use the same element size for reads and
15 * writes, we can use real gvec vector expansion, which potantially uses
16 * real host vector instructions. As they only work up to 64 bit elements,
17 * 128 bit elements (vector is a single element) have to be handled
18 * differently. Operations that are too complicated to encode via TCG ops
19 * are handled via gvec ool (out-of-line) handlers.
20 *
21 * As soon as instructions use different element sizes for reads and writes
22 * or access elements "out of their element scope" we expand them manually
23 * in fancy loops, as gvec expansion does not deal with actual element
24 * numbers and does also not support access to other elements.
25 *
26 * 128 bit elements:
27 *  As we only have i32/i64, such elements have to be loaded into two
28 *  i64 values and can then be processed e.g. by tcg_gen_add2_i64.
29 *
30 * Sizes:
31 *  On s390x, the operand size (oprsz) and the maximum size (maxsz) are
32 *  always 16 (128 bit). What gvec code calls "vece", s390x calls "es",
33 *  a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only
34 *  128 bit element size has to be treated in a special way (MO_64 + 1).
35 *  We will use ES_* instead of MO_* for this reason in this file.
36 *
37 * CC handling:
38 *  As gvec ool-helpers can currently not return values (besides via
39 *  pointers like vectors or cpu_env), whenever we have to set the CC and
40 *  can't conclude the value from the result vector, we will directly
41 *  set it in "env->cc_op" and mark it as static via set_cc_static()".
42 *  Whenever this is done, the helper writes globals (cc_op).
43 */
44
45#define NUM_VEC_ELEMENT_BYTES(es) (1 << (es))
46#define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es))
47#define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE)
48
49#define ES_8    MO_8
50#define ES_16   MO_16
51#define ES_32   MO_32
52#define ES_64   MO_64
53#define ES_128  4
54
55/* Floating-Point Format */
56#define FPF_SHORT       2
57#define FPF_LONG        3
58#define FPF_EXT         4
59
60static inline bool valid_vec_element(uint8_t enr, MemOp es)
61{
62    return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1));
63}
64
65static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr,
66                                 MemOp memop)
67{
68    const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
69
70    switch ((unsigned)memop) {
71    case ES_8:
72        tcg_gen_ld8u_i64(dst, cpu_env, offs);
73        break;
74    case ES_16:
75        tcg_gen_ld16u_i64(dst, cpu_env, offs);
76        break;
77    case ES_32:
78        tcg_gen_ld32u_i64(dst, cpu_env, offs);
79        break;
80    case ES_8 | MO_SIGN:
81        tcg_gen_ld8s_i64(dst, cpu_env, offs);
82        break;
83    case ES_16 | MO_SIGN:
84        tcg_gen_ld16s_i64(dst, cpu_env, offs);
85        break;
86    case ES_32 | MO_SIGN:
87        tcg_gen_ld32s_i64(dst, cpu_env, offs);
88        break;
89    case ES_64:
90    case ES_64 | MO_SIGN:
91        tcg_gen_ld_i64(dst, cpu_env, offs);
92        break;
93    default:
94        g_assert_not_reached();
95    }
96}
97
98static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr,
99                                 MemOp memop)
100{
101    const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
102
103    switch (memop) {
104    case ES_8:
105        tcg_gen_ld8u_i32(dst, cpu_env, offs);
106        break;
107    case ES_16:
108        tcg_gen_ld16u_i32(dst, cpu_env, offs);
109        break;
110    case ES_8 | MO_SIGN:
111        tcg_gen_ld8s_i32(dst, cpu_env, offs);
112        break;
113    case ES_16 | MO_SIGN:
114        tcg_gen_ld16s_i32(dst, cpu_env, offs);
115        break;
116    case ES_32:
117    case ES_32 | MO_SIGN:
118        tcg_gen_ld_i32(dst, cpu_env, offs);
119        break;
120    default:
121        g_assert_not_reached();
122    }
123}
124
125static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr,
126                                  MemOp memop)
127{
128    const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
129
130    switch (memop) {
131    case ES_8:
132        tcg_gen_st8_i64(src, cpu_env, offs);
133        break;
134    case ES_16:
135        tcg_gen_st16_i64(src, cpu_env, offs);
136        break;
137    case ES_32:
138        tcg_gen_st32_i64(src, cpu_env, offs);
139        break;
140    case ES_64:
141        tcg_gen_st_i64(src, cpu_env, offs);
142        break;
143    default:
144        g_assert_not_reached();
145    }
146}
147
148static void write_vec_element_i32(TCGv_i32 src, int reg, uint8_t enr,
149                                  MemOp memop)
150{
151    const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
152
153    switch (memop) {
154    case ES_8:
155        tcg_gen_st8_i32(src, cpu_env, offs);
156        break;
157    case ES_16:
158        tcg_gen_st16_i32(src, cpu_env, offs);
159        break;
160    case ES_32:
161        tcg_gen_st_i32(src, cpu_env, offs);
162        break;
163    default:
164        g_assert_not_reached();
165    }
166}
167
168static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
169                                    uint8_t es)
170{
171    TCGv_i64 tmp = tcg_temp_new_i64();
172
173    /* mask off invalid parts from the element nr */
174    tcg_gen_andi_i64(tmp, enr, NUM_VEC_ELEMENTS(es) - 1);
175
176    /* convert it to an element offset relative to cpu_env (vec_reg_offset() */
177    tcg_gen_shli_i64(tmp, tmp, es);
178#ifndef HOST_WORDS_BIGENDIAN
179    tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es));
180#endif
181    tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg));
182
183    /* generate the final ptr by adding cpu_env */
184    tcg_gen_trunc_i64_ptr(ptr, tmp);
185    tcg_gen_add_ptr(ptr, ptr, cpu_env);
186
187    tcg_temp_free_i64(tmp);
188}
189
190#define gen_gvec_2(v1, v2, gen) \
191    tcg_gen_gvec_2(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
192                   16, 16, gen)
193#define gen_gvec_2s(v1, v2, c, gen) \
194    tcg_gen_gvec_2s(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
195                    16, 16, c, gen)
196#define gen_gvec_2_ool(v1, v2, data, fn) \
197    tcg_gen_gvec_2_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
198                       16, 16, data, fn)
199#define gen_gvec_2i_ool(v1, v2, c, data, fn) \
200    tcg_gen_gvec_2i_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
201                        c, 16, 16, data, fn)
202#define gen_gvec_2_ptr(v1, v2, ptr, data, fn) \
203    tcg_gen_gvec_2_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
204                       ptr, 16, 16, data, fn)
205#define gen_gvec_3(v1, v2, v3, gen) \
206    tcg_gen_gvec_3(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
207                   vec_full_reg_offset(v3), 16, 16, gen)
208#define gen_gvec_3_ool(v1, v2, v3, data, fn) \
209    tcg_gen_gvec_3_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
210                       vec_full_reg_offset(v3), 16, 16, data, fn)
211#define gen_gvec_3_ptr(v1, v2, v3, ptr, data, fn) \
212    tcg_gen_gvec_3_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
213                       vec_full_reg_offset(v3), ptr, 16, 16, data, fn)
214#define gen_gvec_3i(v1, v2, v3, c, gen) \
215    tcg_gen_gvec_3i(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
216                    vec_full_reg_offset(v3), 16, 16, c, gen)
217#define gen_gvec_4(v1, v2, v3, v4, gen) \
218    tcg_gen_gvec_4(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
219                   vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
220                   16, 16, gen)
221#define gen_gvec_4_ool(v1, v2, v3, v4, data, fn) \
222    tcg_gen_gvec_4_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
223                       vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
224                       16, 16, data, fn)
225#define gen_gvec_4_ptr(v1, v2, v3, v4, ptr, data, fn) \
226    tcg_gen_gvec_4_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
227                       vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
228                       ptr, 16, 16, data, fn)
229#define gen_gvec_dup_i64(es, v1, c) \
230    tcg_gen_gvec_dup_i64(es, vec_full_reg_offset(v1), 16, 16, c)
231#define gen_gvec_mov(v1, v2) \
232    tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
233                     16)
234#define gen_gvec_dup_imm(es, v1, c) \
235    tcg_gen_gvec_dup_imm(es, vec_full_reg_offset(v1), 16, 16, c);
236#define gen_gvec_fn_2(fn, es, v1, v2) \
237    tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
238                      16, 16)
239#define gen_gvec_fn_2i(fn, es, v1, v2, c) \
240    tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
241                      c, 16, 16)
242#define gen_gvec_fn_2s(fn, es, v1, v2, s) \
243    tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
244                      s, 16, 16)
245#define gen_gvec_fn_3(fn, es, v1, v2, v3) \
246    tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
247                      vec_full_reg_offset(v3), 16, 16)
248#define gen_gvec_fn_4(fn, es, v1, v2, v3, v4) \
249    tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
250                      vec_full_reg_offset(v3), vec_full_reg_offset(v4), 16, 16)
251
252/*
253 * Helper to carry out a 128 bit vector computation using 2 i64 values per
254 * vector.
255 */
256typedef void (*gen_gvec128_3_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
257                                     TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
258static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn, uint8_t d, uint8_t a,
259                              uint8_t b)
260{
261        TCGv_i64 dh = tcg_temp_new_i64();
262        TCGv_i64 dl = tcg_temp_new_i64();
263        TCGv_i64 ah = tcg_temp_new_i64();
264        TCGv_i64 al = tcg_temp_new_i64();
265        TCGv_i64 bh = tcg_temp_new_i64();
266        TCGv_i64 bl = tcg_temp_new_i64();
267
268        read_vec_element_i64(ah, a, 0, ES_64);
269        read_vec_element_i64(al, a, 1, ES_64);
270        read_vec_element_i64(bh, b, 0, ES_64);
271        read_vec_element_i64(bl, b, 1, ES_64);
272        fn(dl, dh, al, ah, bl, bh);
273        write_vec_element_i64(dh, d, 0, ES_64);
274        write_vec_element_i64(dl, d, 1, ES_64);
275
276        tcg_temp_free_i64(dh);
277        tcg_temp_free_i64(dl);
278        tcg_temp_free_i64(ah);
279        tcg_temp_free_i64(al);
280        tcg_temp_free_i64(bh);
281        tcg_temp_free_i64(bl);
282}
283
284typedef void (*gen_gvec128_4_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
285                                     TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh,
286                                     TCGv_i64 cl, TCGv_i64 ch);
287static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a,
288                              uint8_t b, uint8_t c)
289{
290        TCGv_i64 dh = tcg_temp_new_i64();
291        TCGv_i64 dl = tcg_temp_new_i64();
292        TCGv_i64 ah = tcg_temp_new_i64();
293        TCGv_i64 al = tcg_temp_new_i64();
294        TCGv_i64 bh = tcg_temp_new_i64();
295        TCGv_i64 bl = tcg_temp_new_i64();
296        TCGv_i64 ch = tcg_temp_new_i64();
297        TCGv_i64 cl = tcg_temp_new_i64();
298
299        read_vec_element_i64(ah, a, 0, ES_64);
300        read_vec_element_i64(al, a, 1, ES_64);
301        read_vec_element_i64(bh, b, 0, ES_64);
302        read_vec_element_i64(bl, b, 1, ES_64);
303        read_vec_element_i64(ch, c, 0, ES_64);
304        read_vec_element_i64(cl, c, 1, ES_64);
305        fn(dl, dh, al, ah, bl, bh, cl, ch);
306        write_vec_element_i64(dh, d, 0, ES_64);
307        write_vec_element_i64(dl, d, 1, ES_64);
308
309        tcg_temp_free_i64(dh);
310        tcg_temp_free_i64(dl);
311        tcg_temp_free_i64(ah);
312        tcg_temp_free_i64(al);
313        tcg_temp_free_i64(bh);
314        tcg_temp_free_i64(bl);
315        tcg_temp_free_i64(ch);
316        tcg_temp_free_i64(cl);
317}
318
319static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
320                          uint64_t b)
321{
322    TCGv_i64 bl = tcg_const_i64(b);
323    TCGv_i64 bh = tcg_const_i64(0);
324
325    tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
326    tcg_temp_free_i64(bl);
327    tcg_temp_free_i64(bh);
328}
329
330static DisasJumpType op_vbperm(DisasContext *s, DisasOps *o)
331{
332    gen_gvec_3_ool(get_field(s, v1), get_field(s, v2), get_field(s, v3), 0,
333                   gen_helper_gvec_vbperm);
334
335    return DISAS_NEXT;
336}
337
338static DisasJumpType op_vge(DisasContext *s, DisasOps *o)
339{
340    const uint8_t es = s->insn->data;
341    const uint8_t enr = get_field(s, m3);
342    TCGv_i64 tmp;
343
344    if (!valid_vec_element(enr, es)) {
345        gen_program_exception(s, PGM_SPECIFICATION);
346        return DISAS_NORETURN;
347    }
348
349    tmp = tcg_temp_new_i64();
350    read_vec_element_i64(tmp, get_field(s, v2), enr, es);
351    tcg_gen_add_i64(o->addr1, o->addr1, tmp);
352    gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0);
353
354    tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
355    write_vec_element_i64(tmp, get_field(s, v1), enr, es);
356    tcg_temp_free_i64(tmp);
357    return DISAS_NEXT;
358}
359
360static uint64_t generate_byte_mask(uint8_t mask)
361{
362    uint64_t r = 0;
363    int i;
364
365    for (i = 0; i < 8; i++) {
366        if ((mask >> i) & 1) {
367            r |= 0xffull << (i * 8);
368        }
369    }
370    return r;
371}
372
373static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o)
374{
375    const uint16_t i2 = get_field(s, i2);
376
377    if (i2 == (i2 & 0xff) * 0x0101) {
378        /*
379         * Masks for both 64 bit elements of the vector are the same.
380         * Trust tcg to produce a good constant loading.
381         */
382        gen_gvec_dup_imm(ES_64, get_field(s, v1),
383                         generate_byte_mask(i2 & 0xff));
384    } else {
385        TCGv_i64 t = tcg_temp_new_i64();
386
387        tcg_gen_movi_i64(t, generate_byte_mask(i2 >> 8));
388        write_vec_element_i64(t, get_field(s, v1), 0, ES_64);
389        tcg_gen_movi_i64(t, generate_byte_mask(i2));
390        write_vec_element_i64(t, get_field(s, v1), 1, ES_64);
391        tcg_temp_free_i64(t);
392    }
393    return DISAS_NEXT;
394}
395
396static DisasJumpType op_vgm(DisasContext *s, DisasOps *o)
397{
398    const uint8_t es = get_field(s, m4);
399    const uint8_t bits = NUM_VEC_ELEMENT_BITS(es);
400    const uint8_t i2 = get_field(s, i2) & (bits - 1);
401    const uint8_t i3 = get_field(s, i3) & (bits - 1);
402    uint64_t mask = 0;
403    int i;
404
405    if (es > ES_64) {
406        gen_program_exception(s, PGM_SPECIFICATION);
407        return DISAS_NORETURN;
408    }
409
410    /* generate the mask - take care of wrapping */
411    for (i = i2; ; i = (i + 1) % bits) {
412        mask |= 1ull << (bits - i - 1);
413        if (i == i3) {
414            break;
415        }
416    }
417
418    gen_gvec_dup_imm(es, get_field(s, v1), mask);
419    return DISAS_NEXT;
420}
421
422static DisasJumpType op_vl(DisasContext *s, DisasOps *o)
423{
424    TCGv_i64 t0 = tcg_temp_new_i64();
425    TCGv_i64 t1 = tcg_temp_new_i64();
426
427    tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ);
428    gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
429    tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
430    write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
431    write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
432    tcg_temp_free(t0);
433    tcg_temp_free(t1);
434    return DISAS_NEXT;
435}
436
437static DisasJumpType op_vlr(DisasContext *s, DisasOps *o)
438{
439    gen_gvec_mov(get_field(s, v1), get_field(s, v2));
440    return DISAS_NEXT;
441}
442
443static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o)
444{
445    const uint8_t es = get_field(s, m3);
446    TCGv_i64 tmp;
447
448    if (es > ES_64) {
449        gen_program_exception(s, PGM_SPECIFICATION);
450        return DISAS_NORETURN;
451    }
452
453    tmp = tcg_temp_new_i64();
454    tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
455    gen_gvec_dup_i64(es, get_field(s, v1), tmp);
456    tcg_temp_free_i64(tmp);
457    return DISAS_NEXT;
458}
459
460static DisasJumpType op_vle(DisasContext *s, DisasOps *o)
461{
462    const uint8_t es = s->insn->data;
463    const uint8_t enr = get_field(s, m3);
464    TCGv_i64 tmp;
465
466    if (!valid_vec_element(enr, es)) {
467        gen_program_exception(s, PGM_SPECIFICATION);
468        return DISAS_NORETURN;
469    }
470
471    tmp = tcg_temp_new_i64();
472    tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
473    write_vec_element_i64(tmp, get_field(s, v1), enr, es);
474    tcg_temp_free_i64(tmp);
475    return DISAS_NEXT;
476}
477
478static DisasJumpType op_vlei(DisasContext *s, DisasOps *o)
479{
480    const uint8_t es = s->insn->data;
481    const uint8_t enr = get_field(s, m3);
482    TCGv_i64 tmp;
483
484    if (!valid_vec_element(enr, es)) {
485        gen_program_exception(s, PGM_SPECIFICATION);
486        return DISAS_NORETURN;
487    }
488
489    tmp = tcg_const_i64((int16_t)get_field(s, i2));
490    write_vec_element_i64(tmp, get_field(s, v1), enr, es);
491    tcg_temp_free_i64(tmp);
492    return DISAS_NEXT;
493}
494
495static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o)
496{
497    const uint8_t es = get_field(s, m4);
498    TCGv_ptr ptr;
499
500    if (es > ES_64) {
501        gen_program_exception(s, PGM_SPECIFICATION);
502        return DISAS_NORETURN;
503    }
504
505    /* fast path if we don't need the register content */
506    if (!get_field(s, b2)) {
507        uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1);
508
509        read_vec_element_i64(o->out, get_field(s, v3), enr, es);
510        return DISAS_NEXT;
511    }
512
513    ptr = tcg_temp_new_ptr();
514    get_vec_element_ptr_i64(ptr, get_field(s, v3), o->addr1, es);
515    switch (es) {
516    case ES_8:
517        tcg_gen_ld8u_i64(o->out, ptr, 0);
518        break;
519    case ES_16:
520        tcg_gen_ld16u_i64(o->out, ptr, 0);
521        break;
522    case ES_32:
523        tcg_gen_ld32u_i64(o->out, ptr, 0);
524        break;
525    case ES_64:
526        tcg_gen_ld_i64(o->out, ptr, 0);
527        break;
528    default:
529        g_assert_not_reached();
530    }
531    tcg_temp_free_ptr(ptr);
532
533    return DISAS_NEXT;
534}
535
536static DisasJumpType op_vllez(DisasContext *s, DisasOps *o)
537{
538    uint8_t es = get_field(s, m3);
539    uint8_t enr;
540    TCGv_i64 t;
541
542    switch (es) {
543    /* rightmost sub-element of leftmost doubleword */
544    case ES_8:
545        enr = 7;
546        break;
547    case ES_16:
548        enr = 3;
549        break;
550    case ES_32:
551        enr = 1;
552        break;
553    case ES_64:
554        enr = 0;
555        break;
556    /* leftmost sub-element of leftmost doubleword */
557    case 6:
558        if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
559            es = ES_32;
560            enr = 0;
561            break;
562        }
563        /* fallthrough */
564    default:
565        gen_program_exception(s, PGM_SPECIFICATION);
566        return DISAS_NORETURN;
567    }
568
569    t = tcg_temp_new_i64();
570    tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es);
571    gen_gvec_dup_imm(es, get_field(s, v1), 0);
572    write_vec_element_i64(t, get_field(s, v1), enr, es);
573    tcg_temp_free_i64(t);
574    return DISAS_NEXT;
575}
576
577static DisasJumpType op_vlm(DisasContext *s, DisasOps *o)
578{
579    const uint8_t v3 = get_field(s, v3);
580    uint8_t v1 = get_field(s, v1);
581    TCGv_i64 t0, t1;
582
583    if (v3 < v1 || (v3 - v1 + 1) > 16) {
584        gen_program_exception(s, PGM_SPECIFICATION);
585        return DISAS_NORETURN;
586    }
587
588    /*
589     * Check for possible access exceptions by trying to load the last
590     * element. The first element will be checked first next.
591     */
592    t0 = tcg_temp_new_i64();
593    t1 = tcg_temp_new_i64();
594    gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8);
595    tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEQ);
596
597    for (;; v1++) {
598        tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
599        write_vec_element_i64(t1, v1, 0, ES_64);
600        if (v1 == v3) {
601            break;
602        }
603        gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
604        tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
605        write_vec_element_i64(t1, v1, 1, ES_64);
606        gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
607    }
608
609    /* Store the last element, loaded first */
610    write_vec_element_i64(t0, v1, 1, ES_64);
611
612    tcg_temp_free_i64(t0);
613    tcg_temp_free_i64(t1);
614    return DISAS_NEXT;
615}
616
617static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o)
618{
619    const int64_t block_size = (1ull << (get_field(s, m3) + 6));
620    const int v1_offs = vec_full_reg_offset(get_field(s, v1));
621    TCGv_ptr a0;
622    TCGv_i64 bytes;
623
624    if (get_field(s, m3) > 6) {
625        gen_program_exception(s, PGM_SPECIFICATION);
626        return DISAS_NORETURN;
627    }
628
629    bytes = tcg_temp_new_i64();
630    a0 = tcg_temp_new_ptr();
631    /* calculate the number of bytes until the next block boundary */
632    tcg_gen_ori_i64(bytes, o->addr1, -block_size);
633    tcg_gen_neg_i64(bytes, bytes);
634
635    tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
636    gen_helper_vll(cpu_env, a0, o->addr1, bytes);
637    tcg_temp_free_i64(bytes);
638    tcg_temp_free_ptr(a0);
639    return DISAS_NEXT;
640}
641
642static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o)
643{
644    const uint8_t es = get_field(s, m4);
645    TCGv_ptr ptr;
646
647    if (es > ES_64) {
648        gen_program_exception(s, PGM_SPECIFICATION);
649        return DISAS_NORETURN;
650    }
651
652    /* fast path if we don't need the register content */
653    if (!get_field(s, b2)) {
654        uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1);
655
656        write_vec_element_i64(o->in2, get_field(s, v1), enr, es);
657        return DISAS_NEXT;
658    }
659
660    ptr = tcg_temp_new_ptr();
661    get_vec_element_ptr_i64(ptr, get_field(s, v1), o->addr1, es);
662    switch (es) {
663    case ES_8:
664        tcg_gen_st8_i64(o->in2, ptr, 0);
665        break;
666    case ES_16:
667        tcg_gen_st16_i64(o->in2, ptr, 0);
668        break;
669    case ES_32:
670        tcg_gen_st32_i64(o->in2, ptr, 0);
671        break;
672    case ES_64:
673        tcg_gen_st_i64(o->in2, ptr, 0);
674        break;
675    default:
676        g_assert_not_reached();
677    }
678    tcg_temp_free_ptr(ptr);
679
680    return DISAS_NEXT;
681}
682
683static DisasJumpType op_vlvgp(DisasContext *s, DisasOps *o)
684{
685    write_vec_element_i64(o->in1, get_field(s, v1), 0, ES_64);
686    write_vec_element_i64(o->in2, get_field(s, v1), 1, ES_64);
687    return DISAS_NEXT;
688}
689
690static DisasJumpType op_vll(DisasContext *s, DisasOps *o)
691{
692    const int v1_offs = vec_full_reg_offset(get_field(s, v1));
693    TCGv_ptr a0 = tcg_temp_new_ptr();
694
695    /* convert highest index into an actual length */
696    tcg_gen_addi_i64(o->in2, o->in2, 1);
697    tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
698    gen_helper_vll(cpu_env, a0, o->addr1, o->in2);
699    tcg_temp_free_ptr(a0);
700    return DISAS_NEXT;
701}
702
703static DisasJumpType op_vmr(DisasContext *s, DisasOps *o)
704{
705    const uint8_t v1 = get_field(s, v1);
706    const uint8_t v2 = get_field(s, v2);
707    const uint8_t v3 = get_field(s, v3);
708    const uint8_t es = get_field(s, m4);
709    int dst_idx, src_idx;
710    TCGv_i64 tmp;
711
712    if (es > ES_64) {
713        gen_program_exception(s, PGM_SPECIFICATION);
714        return DISAS_NORETURN;
715    }
716
717    tmp = tcg_temp_new_i64();
718    if (s->fields.op2 == 0x61) {
719        /* iterate backwards to avoid overwriting data we might need later */
720        for (dst_idx = NUM_VEC_ELEMENTS(es) - 1; dst_idx >= 0; dst_idx--) {
721            src_idx = dst_idx / 2;
722            if (dst_idx % 2 == 0) {
723                read_vec_element_i64(tmp, v2, src_idx, es);
724            } else {
725                read_vec_element_i64(tmp, v3, src_idx, es);
726            }
727            write_vec_element_i64(tmp, v1, dst_idx, es);
728        }
729    } else {
730        /* iterate forward to avoid overwriting data we might need later */
731        for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(es); dst_idx++) {
732            src_idx = (dst_idx + NUM_VEC_ELEMENTS(es)) / 2;
733            if (dst_idx % 2 == 0) {
734                read_vec_element_i64(tmp, v2, src_idx, es);
735            } else {
736                read_vec_element_i64(tmp, v3, src_idx, es);
737            }
738            write_vec_element_i64(tmp, v1, dst_idx, es);
739        }
740    }
741    tcg_temp_free_i64(tmp);
742    return DISAS_NEXT;
743}
744
745static DisasJumpType op_vpk(DisasContext *s, DisasOps *o)
746{
747    const uint8_t v1 = get_field(s, v1);
748    const uint8_t v2 = get_field(s, v2);
749    const uint8_t v3 = get_field(s, v3);
750    const uint8_t es = get_field(s, m4);
751    static gen_helper_gvec_3 * const vpk[3] = {
752        gen_helper_gvec_vpk16,
753        gen_helper_gvec_vpk32,
754        gen_helper_gvec_vpk64,
755    };
756     static gen_helper_gvec_3 * const vpks[3] = {
757        gen_helper_gvec_vpks16,
758        gen_helper_gvec_vpks32,
759        gen_helper_gvec_vpks64,
760    };
761    static gen_helper_gvec_3_ptr * const vpks_cc[3] = {
762        gen_helper_gvec_vpks_cc16,
763        gen_helper_gvec_vpks_cc32,
764        gen_helper_gvec_vpks_cc64,
765    };
766    static gen_helper_gvec_3 * const vpkls[3] = {
767        gen_helper_gvec_vpkls16,
768        gen_helper_gvec_vpkls32,
769        gen_helper_gvec_vpkls64,
770    };
771    static gen_helper_gvec_3_ptr * const vpkls_cc[3] = {
772        gen_helper_gvec_vpkls_cc16,
773        gen_helper_gvec_vpkls_cc32,
774        gen_helper_gvec_vpkls_cc64,
775    };
776
777    if (es == ES_8 || es > ES_64) {
778        gen_program_exception(s, PGM_SPECIFICATION);
779        return DISAS_NORETURN;
780    }
781
782    switch (s->fields.op2) {
783    case 0x97:
784        if (get_field(s, m5) & 0x1) {
785            gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpks_cc[es - 1]);
786            set_cc_static(s);
787        } else {
788            gen_gvec_3_ool(v1, v2, v3, 0, vpks[es - 1]);
789        }
790        break;
791    case 0x95:
792        if (get_field(s, m5) & 0x1) {
793            gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpkls_cc[es - 1]);
794            set_cc_static(s);
795        } else {
796            gen_gvec_3_ool(v1, v2, v3, 0, vpkls[es - 1]);
797        }
798        break;
799    case 0x94:
800        /* If sources and destination dont't overlap -> fast path */
801        if (v1 != v2 && v1 != v3) {
802            const uint8_t src_es = get_field(s, m4);
803            const uint8_t dst_es = src_es - 1;
804            TCGv_i64 tmp = tcg_temp_new_i64();
805            int dst_idx, src_idx;
806
807            for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) {
808                src_idx = dst_idx;
809                if (src_idx < NUM_VEC_ELEMENTS(src_es)) {
810                    read_vec_element_i64(tmp, v2, src_idx, src_es);
811                } else {
812                    src_idx -= NUM_VEC_ELEMENTS(src_es);
813                    read_vec_element_i64(tmp, v3, src_idx, src_es);
814                }
815                write_vec_element_i64(tmp, v1, dst_idx, dst_es);
816            }
817            tcg_temp_free_i64(tmp);
818        } else {
819            gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]);
820        }
821        break;
822    default:
823        g_assert_not_reached();
824    }
825    return DISAS_NEXT;
826}
827
828static DisasJumpType op_vperm(DisasContext *s, DisasOps *o)
829{
830    gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
831                   get_field(s, v3), get_field(s, v4),
832                   0, gen_helper_gvec_vperm);
833    return DISAS_NEXT;
834}
835
836static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o)
837{
838    const uint8_t i2 = extract32(get_field(s, m4), 2, 1);
839    const uint8_t i3 = extract32(get_field(s, m4), 0, 1);
840    TCGv_i64 t0 = tcg_temp_new_i64();
841    TCGv_i64 t1 = tcg_temp_new_i64();
842
843    read_vec_element_i64(t0, get_field(s, v2), i2, ES_64);
844    read_vec_element_i64(t1, get_field(s, v3), i3, ES_64);
845    write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
846    write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
847    tcg_temp_free_i64(t0);
848    tcg_temp_free_i64(t1);
849    return DISAS_NEXT;
850}
851
852static DisasJumpType op_vrep(DisasContext *s, DisasOps *o)
853{
854    const uint8_t enr = get_field(s, i2);
855    const uint8_t es = get_field(s, m4);
856
857    if (es > ES_64 || !valid_vec_element(enr, es)) {
858        gen_program_exception(s, PGM_SPECIFICATION);
859        return DISAS_NORETURN;
860    }
861
862    tcg_gen_gvec_dup_mem(es, vec_full_reg_offset(get_field(s, v1)),
863                         vec_reg_offset(get_field(s, v3), enr, es),
864                         16, 16);
865    return DISAS_NEXT;
866}
867
868static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o)
869{
870    const int64_t data = (int16_t)get_field(s, i2);
871    const uint8_t es = get_field(s, m3);
872
873    if (es > ES_64) {
874        gen_program_exception(s, PGM_SPECIFICATION);
875        return DISAS_NORETURN;
876    }
877
878    gen_gvec_dup_imm(es, get_field(s, v1), data);
879    return DISAS_NEXT;
880}
881
882static DisasJumpType op_vsce(DisasContext *s, DisasOps *o)
883{
884    const uint8_t es = s->insn->data;
885    const uint8_t enr = get_field(s, m3);
886    TCGv_i64 tmp;
887
888    if (!valid_vec_element(enr, es)) {
889        gen_program_exception(s, PGM_SPECIFICATION);
890        return DISAS_NORETURN;
891    }
892
893    tmp = tcg_temp_new_i64();
894    read_vec_element_i64(tmp, get_field(s, v2), enr, es);
895    tcg_gen_add_i64(o->addr1, o->addr1, tmp);
896    gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0);
897
898    read_vec_element_i64(tmp, get_field(s, v1), enr, es);
899    tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
900    tcg_temp_free_i64(tmp);
901    return DISAS_NEXT;
902}
903
904static DisasJumpType op_vsel(DisasContext *s, DisasOps *o)
905{
906    gen_gvec_fn_4(bitsel, ES_8, get_field(s, v1),
907                  get_field(s, v4), get_field(s, v2),
908                  get_field(s, v3));
909    return DISAS_NEXT;
910}
911
912static DisasJumpType op_vseg(DisasContext *s, DisasOps *o)
913{
914    const uint8_t es = get_field(s, m3);
915    int idx1, idx2;
916    TCGv_i64 tmp;
917
918    switch (es) {
919    case ES_8:
920        idx1 = 7;
921        idx2 = 15;
922        break;
923    case ES_16:
924        idx1 = 3;
925        idx2 = 7;
926        break;
927    case ES_32:
928        idx1 = 1;
929        idx2 = 3;
930        break;
931    default:
932        gen_program_exception(s, PGM_SPECIFICATION);
933        return DISAS_NORETURN;
934    }
935
936    tmp = tcg_temp_new_i64();
937    read_vec_element_i64(tmp, get_field(s, v2), idx1, es | MO_SIGN);
938    write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
939    read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN);
940    write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
941    tcg_temp_free_i64(tmp);
942    return DISAS_NEXT;
943}
944
945static DisasJumpType op_vst(DisasContext *s, DisasOps *o)
946{
947    TCGv_i64 tmp = tcg_const_i64(16);
948
949    /* Probe write access before actually modifying memory */
950    gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
951
952    read_vec_element_i64(tmp,  get_field(s, v1), 0, ES_64);
953    tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
954    gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
955    read_vec_element_i64(tmp,  get_field(s, v1), 1, ES_64);
956    tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
957    tcg_temp_free_i64(tmp);
958    return DISAS_NEXT;
959}
960
961static DisasJumpType op_vste(DisasContext *s, DisasOps *o)
962{
963    const uint8_t es = s->insn->data;
964    const uint8_t enr = get_field(s, m3);
965    TCGv_i64 tmp;
966
967    if (!valid_vec_element(enr, es)) {
968        gen_program_exception(s, PGM_SPECIFICATION);
969        return DISAS_NORETURN;
970    }
971
972    tmp = tcg_temp_new_i64();
973    read_vec_element_i64(tmp, get_field(s, v1), enr, es);
974    tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
975    tcg_temp_free_i64(tmp);
976    return DISAS_NEXT;
977}
978
979static DisasJumpType op_vstm(DisasContext *s, DisasOps *o)
980{
981    const uint8_t v3 = get_field(s, v3);
982    uint8_t v1 = get_field(s, v1);
983    TCGv_i64 tmp;
984
985    while (v3 < v1 || (v3 - v1 + 1) > 16) {
986        gen_program_exception(s, PGM_SPECIFICATION);
987        return DISAS_NORETURN;
988    }
989
990    /* Probe write access before actually modifying memory */
991    tmp = tcg_const_i64((v3 - v1 + 1) * 16);
992    gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
993
994    for (;; v1++) {
995        read_vec_element_i64(tmp, v1, 0, ES_64);
996        tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
997        gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
998        read_vec_element_i64(tmp, v1, 1, ES_64);
999        tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
1000        if (v1 == v3) {
1001            break;
1002        }
1003        gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
1004    }
1005    tcg_temp_free_i64(tmp);
1006    return DISAS_NEXT;
1007}
1008
1009static DisasJumpType op_vstl(DisasContext *s, DisasOps *o)
1010{
1011    const int v1_offs = vec_full_reg_offset(get_field(s, v1));
1012    TCGv_ptr a0 = tcg_temp_new_ptr();
1013
1014    /* convert highest index into an actual length */
1015    tcg_gen_addi_i64(o->in2, o->in2, 1);
1016    tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
1017    gen_helper_vstl(cpu_env, a0, o->addr1, o->in2);
1018    tcg_temp_free_ptr(a0);
1019    return DISAS_NEXT;
1020}
1021
1022static DisasJumpType op_vup(DisasContext *s, DisasOps *o)
1023{
1024    const bool logical = s->fields.op2 == 0xd4 || s->fields.op2 == 0xd5;
1025    const uint8_t v1 = get_field(s, v1);
1026    const uint8_t v2 = get_field(s, v2);
1027    const uint8_t src_es = get_field(s, m3);
1028    const uint8_t dst_es = src_es + 1;
1029    int dst_idx, src_idx;
1030    TCGv_i64 tmp;
1031
1032    if (src_es > ES_32) {
1033        gen_program_exception(s, PGM_SPECIFICATION);
1034        return DISAS_NORETURN;
1035    }
1036
1037    tmp = tcg_temp_new_i64();
1038    if (s->fields.op2 == 0xd7 || s->fields.op2 == 0xd5) {
1039        /* iterate backwards to avoid overwriting data we might need later */
1040        for (dst_idx = NUM_VEC_ELEMENTS(dst_es) - 1; dst_idx >= 0; dst_idx--) {
1041            src_idx = dst_idx;
1042            read_vec_element_i64(tmp, v2, src_idx,
1043                                 src_es | (logical ? 0 : MO_SIGN));
1044            write_vec_element_i64(tmp, v1, dst_idx, dst_es);
1045        }
1046
1047    } else {
1048        /* iterate forward to avoid overwriting data we might need later */
1049        for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) {
1050            src_idx = dst_idx + NUM_VEC_ELEMENTS(src_es) / 2;
1051            read_vec_element_i64(tmp, v2, src_idx,
1052                                 src_es | (logical ? 0 : MO_SIGN));
1053            write_vec_element_i64(tmp, v1, dst_idx, dst_es);
1054        }
1055    }
1056    tcg_temp_free_i64(tmp);
1057    return DISAS_NEXT;
1058}
1059
1060static DisasJumpType op_va(DisasContext *s, DisasOps *o)
1061{
1062    const uint8_t es = get_field(s, m4);
1063
1064    if (es > ES_128) {
1065        gen_program_exception(s, PGM_SPECIFICATION);
1066        return DISAS_NORETURN;
1067    } else if (es == ES_128) {
1068        gen_gvec128_3_i64(tcg_gen_add2_i64, get_field(s, v1),
1069                          get_field(s, v2), get_field(s, v3));
1070        return DISAS_NEXT;
1071    }
1072    gen_gvec_fn_3(add, es, get_field(s, v1), get_field(s, v2),
1073                  get_field(s, v3));
1074    return DISAS_NEXT;
1075}
1076
1077static void gen_acc(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es)
1078{
1079    const uint8_t msb_bit_nr = NUM_VEC_ELEMENT_BITS(es) - 1;
1080    TCGv_i64 msb_mask = tcg_const_i64(dup_const(es, 1ull << msb_bit_nr));
1081    TCGv_i64 t1 = tcg_temp_new_i64();
1082    TCGv_i64 t2 = tcg_temp_new_i64();
1083    TCGv_i64 t3 = tcg_temp_new_i64();
1084
1085    /* Calculate the carry into the MSB, ignoring the old MSBs */
1086    tcg_gen_andc_i64(t1, a, msb_mask);
1087    tcg_gen_andc_i64(t2, b, msb_mask);
1088    tcg_gen_add_i64(t1, t1, t2);
1089    /* Calculate the MSB without any carry into it */
1090    tcg_gen_xor_i64(t3, a, b);
1091    /* Calculate the carry out of the MSB in the MSB bit position */
1092    tcg_gen_and_i64(d, a, b);
1093    tcg_gen_and_i64(t1, t1, t3);
1094    tcg_gen_or_i64(d, d, t1);
1095    /* Isolate and shift the carry into position */
1096    tcg_gen_and_i64(d, d, msb_mask);
1097    tcg_gen_shri_i64(d, d, msb_bit_nr);
1098
1099    tcg_temp_free_i64(t1);
1100    tcg_temp_free_i64(t2);
1101    tcg_temp_free_i64(t3);
1102}
1103
1104static void gen_acc8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1105{
1106    gen_acc(d, a, b, ES_8);
1107}
1108
1109static void gen_acc16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1110{
1111    gen_acc(d, a, b, ES_16);
1112}
1113
1114static void gen_acc_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1115{
1116    TCGv_i32 t = tcg_temp_new_i32();
1117
1118    tcg_gen_add_i32(t, a, b);
1119    tcg_gen_setcond_i32(TCG_COND_LTU, d, t, b);
1120    tcg_temp_free_i32(t);
1121}
1122
1123static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1124{
1125    TCGv_i64 t = tcg_temp_new_i64();
1126
1127    tcg_gen_add_i64(t, a, b);
1128    tcg_gen_setcond_i64(TCG_COND_LTU, d, t, b);
1129    tcg_temp_free_i64(t);
1130}
1131
1132static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
1133                         TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
1134{
1135    TCGv_i64 th = tcg_temp_new_i64();
1136    TCGv_i64 tl = tcg_temp_new_i64();
1137    TCGv_i64 zero = tcg_const_i64(0);
1138
1139    tcg_gen_add2_i64(tl, th, al, zero, bl, zero);
1140    tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
1141    tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
1142    tcg_gen_mov_i64(dh, zero);
1143
1144    tcg_temp_free_i64(th);
1145    tcg_temp_free_i64(tl);
1146    tcg_temp_free_i64(zero);
1147}
1148
1149static DisasJumpType op_vacc(DisasContext *s, DisasOps *o)
1150{
1151    const uint8_t es = get_field(s, m4);
1152    static const GVecGen3 g[4] = {
1153        { .fni8 = gen_acc8_i64, },
1154        { .fni8 = gen_acc16_i64, },
1155        { .fni4 = gen_acc_i32, },
1156        { .fni8 = gen_acc_i64, },
1157    };
1158
1159    if (es > ES_128) {
1160        gen_program_exception(s, PGM_SPECIFICATION);
1161        return DISAS_NORETURN;
1162    } else if (es == ES_128) {
1163        gen_gvec128_3_i64(gen_acc2_i64, get_field(s, v1),
1164                          get_field(s, v2), get_field(s, v3));
1165        return DISAS_NEXT;
1166    }
1167    gen_gvec_3(get_field(s, v1), get_field(s, v2),
1168               get_field(s, v3), &g[es]);
1169    return DISAS_NEXT;
1170}
1171
1172static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
1173                        TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
1174{
1175    TCGv_i64 tl = tcg_temp_new_i64();
1176    TCGv_i64 th = tcg_const_i64(0);
1177
1178    /* extract the carry only */
1179    tcg_gen_extract_i64(tl, cl, 0, 1);
1180    tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
1181    tcg_gen_add2_i64(dl, dh, dl, dh, tl, th);
1182
1183    tcg_temp_free_i64(tl);
1184    tcg_temp_free_i64(th);
1185}
1186
1187static DisasJumpType op_vac(DisasContext *s, DisasOps *o)
1188{
1189    if (get_field(s, m5) != ES_128) {
1190        gen_program_exception(s, PGM_SPECIFICATION);
1191        return DISAS_NORETURN;
1192    }
1193
1194    gen_gvec128_4_i64(gen_ac2_i64, get_field(s, v1),
1195                      get_field(s, v2), get_field(s, v3),
1196                      get_field(s, v4));
1197    return DISAS_NEXT;
1198}
1199
1200static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
1201                          TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
1202{
1203    TCGv_i64 tl = tcg_temp_new_i64();
1204    TCGv_i64 th = tcg_temp_new_i64();
1205    TCGv_i64 zero = tcg_const_i64(0);
1206
1207    tcg_gen_andi_i64(tl, cl, 1);
1208    tcg_gen_add2_i64(tl, th, tl, zero, al, zero);
1209    tcg_gen_add2_i64(tl, th, tl, th, bl, zero);
1210    tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
1211    tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
1212    tcg_gen_mov_i64(dh, zero);
1213
1214    tcg_temp_free_i64(tl);
1215    tcg_temp_free_i64(th);
1216    tcg_temp_free_i64(zero);
1217}
1218
1219static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o)
1220{
1221    if (get_field(s, m5) != ES_128) {
1222        gen_program_exception(s, PGM_SPECIFICATION);
1223        return DISAS_NORETURN;
1224    }
1225
1226    gen_gvec128_4_i64(gen_accc2_i64, get_field(s, v1),
1227                      get_field(s, v2), get_field(s, v3),
1228                      get_field(s, v4));
1229    return DISAS_NEXT;
1230}
1231
1232static DisasJumpType op_vn(DisasContext *s, DisasOps *o)
1233{
1234    gen_gvec_fn_3(and, ES_8, get_field(s, v1), get_field(s, v2),
1235                  get_field(s, v3));
1236    return DISAS_NEXT;
1237}
1238
1239static DisasJumpType op_vnc(DisasContext *s, DisasOps *o)
1240{
1241    gen_gvec_fn_3(andc, ES_8, get_field(s, v1),
1242                  get_field(s, v2), get_field(s, v3));
1243    return DISAS_NEXT;
1244}
1245
1246static void gen_avg_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1247{
1248    TCGv_i64 t0 = tcg_temp_new_i64();
1249    TCGv_i64 t1 = tcg_temp_new_i64();
1250
1251    tcg_gen_ext_i32_i64(t0, a);
1252    tcg_gen_ext_i32_i64(t1, b);
1253    tcg_gen_add_i64(t0, t0, t1);
1254    tcg_gen_addi_i64(t0, t0, 1);
1255    tcg_gen_shri_i64(t0, t0, 1);
1256    tcg_gen_extrl_i64_i32(d, t0);
1257
1258    tcg_temp_free(t0);
1259    tcg_temp_free(t1);
1260}
1261
1262static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
1263{
1264    TCGv_i64 dh = tcg_temp_new_i64();
1265    TCGv_i64 ah = tcg_temp_new_i64();
1266    TCGv_i64 bh = tcg_temp_new_i64();
1267
1268    /* extending the sign by one bit is sufficient */
1269    tcg_gen_extract_i64(ah, al, 63, 1);
1270    tcg_gen_extract_i64(bh, bl, 63, 1);
1271    tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
1272    gen_addi2_i64(dl, dh, dl, dh, 1);
1273    tcg_gen_extract2_i64(dl, dl, dh, 1);
1274
1275    tcg_temp_free_i64(dh);
1276    tcg_temp_free_i64(ah);
1277    tcg_temp_free_i64(bh);
1278}
1279
1280static DisasJumpType op_vavg(DisasContext *s, DisasOps *o)
1281{
1282    const uint8_t es = get_field(s, m4);
1283    static const GVecGen3 g[4] = {
1284        { .fno = gen_helper_gvec_vavg8, },
1285        { .fno = gen_helper_gvec_vavg16, },
1286        { .fni4 = gen_avg_i32, },
1287        { .fni8 = gen_avg_i64, },
1288    };
1289
1290    if (es > ES_64) {
1291        gen_program_exception(s, PGM_SPECIFICATION);
1292        return DISAS_NORETURN;
1293    }
1294    gen_gvec_3(get_field(s, v1), get_field(s, v2),
1295               get_field(s, v3), &g[es]);
1296    return DISAS_NEXT;
1297}
1298
1299static void gen_avgl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1300{
1301    TCGv_i64 t0 = tcg_temp_new_i64();
1302    TCGv_i64 t1 = tcg_temp_new_i64();
1303
1304    tcg_gen_extu_i32_i64(t0, a);
1305    tcg_gen_extu_i32_i64(t1, b);
1306    tcg_gen_add_i64(t0, t0, t1);
1307    tcg_gen_addi_i64(t0, t0, 1);
1308    tcg_gen_shri_i64(t0, t0, 1);
1309    tcg_gen_extrl_i64_i32(d, t0);
1310
1311    tcg_temp_free(t0);
1312    tcg_temp_free(t1);
1313}
1314
1315static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
1316{
1317    TCGv_i64 dh = tcg_temp_new_i64();
1318    TCGv_i64 zero = tcg_const_i64(0);
1319
1320    tcg_gen_add2_i64(dl, dh, al, zero, bl, zero);
1321    gen_addi2_i64(dl, dh, dl, dh, 1);
1322    tcg_gen_extract2_i64(dl, dl, dh, 1);
1323
1324    tcg_temp_free_i64(dh);
1325    tcg_temp_free_i64(zero);
1326}
1327
1328static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o)
1329{
1330    const uint8_t es = get_field(s, m4);
1331    static const GVecGen3 g[4] = {
1332        { .fno = gen_helper_gvec_vavgl8, },
1333        { .fno = gen_helper_gvec_vavgl16, },
1334        { .fni4 = gen_avgl_i32, },
1335        { .fni8 = gen_avgl_i64, },
1336    };
1337
1338    if (es > ES_64) {
1339        gen_program_exception(s, PGM_SPECIFICATION);
1340        return DISAS_NORETURN;
1341    }
1342    gen_gvec_3(get_field(s, v1), get_field(s, v2),
1343               get_field(s, v3), &g[es]);
1344    return DISAS_NEXT;
1345}
1346
1347static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o)
1348{
1349    TCGv_i32 tmp = tcg_temp_new_i32();
1350    TCGv_i32 sum = tcg_temp_new_i32();
1351    int i;
1352
1353    read_vec_element_i32(sum, get_field(s, v3), 1, ES_32);
1354    for (i = 0; i < 4; i++) {
1355        read_vec_element_i32(tmp, get_field(s, v2), i, ES_32);
1356        tcg_gen_add2_i32(tmp, sum, sum, sum, tmp, tmp);
1357    }
1358    gen_gvec_dup_imm(ES_32, get_field(s, v1), 0);
1359    write_vec_element_i32(sum, get_field(s, v1), 1, ES_32);
1360
1361    tcg_temp_free_i32(tmp);
1362    tcg_temp_free_i32(sum);
1363    return DISAS_NEXT;
1364}
1365
1366static DisasJumpType op_vec(DisasContext *s, DisasOps *o)
1367{
1368    uint8_t es = get_field(s, m3);
1369    const uint8_t enr = NUM_VEC_ELEMENTS(es) / 2 - 1;
1370
1371    if (es > ES_64) {
1372        gen_program_exception(s, PGM_SPECIFICATION);
1373        return DISAS_NORETURN;
1374    }
1375    if (s->fields.op2 == 0xdb) {
1376        es |= MO_SIGN;
1377    }
1378
1379    o->in1 = tcg_temp_new_i64();
1380    o->in2 = tcg_temp_new_i64();
1381    read_vec_element_i64(o->in1, get_field(s, v1), enr, es);
1382    read_vec_element_i64(o->in2, get_field(s, v2), enr, es);
1383    return DISAS_NEXT;
1384}
1385
1386static DisasJumpType op_vc(DisasContext *s, DisasOps *o)
1387{
1388    const uint8_t es = get_field(s, m4);
1389    TCGCond cond = s->insn->data;
1390
1391    if (es > ES_64) {
1392        gen_program_exception(s, PGM_SPECIFICATION);
1393        return DISAS_NORETURN;
1394    }
1395
1396    tcg_gen_gvec_cmp(cond, es,
1397                     vec_full_reg_offset(get_field(s, v1)),
1398                     vec_full_reg_offset(get_field(s, v2)),
1399                     vec_full_reg_offset(get_field(s, v3)), 16, 16);
1400    if (get_field(s, m5) & 0x1) {
1401        TCGv_i64 low = tcg_temp_new_i64();
1402        TCGv_i64 high = tcg_temp_new_i64();
1403
1404        read_vec_element_i64(high, get_field(s, v1), 0, ES_64);
1405        read_vec_element_i64(low, get_field(s, v1), 1, ES_64);
1406        gen_op_update2_cc_i64(s, CC_OP_VC, low, high);
1407
1408        tcg_temp_free_i64(low);
1409        tcg_temp_free_i64(high);
1410    }
1411    return DISAS_NEXT;
1412}
1413
1414static void gen_clz_i32(TCGv_i32 d, TCGv_i32 a)
1415{
1416    tcg_gen_clzi_i32(d, a, 32);
1417}
1418
1419static void gen_clz_i64(TCGv_i64 d, TCGv_i64 a)
1420{
1421    tcg_gen_clzi_i64(d, a, 64);
1422}
1423
1424static DisasJumpType op_vclz(DisasContext *s, DisasOps *o)
1425{
1426    const uint8_t es = get_field(s, m3);
1427    static const GVecGen2 g[4] = {
1428        { .fno = gen_helper_gvec_vclz8, },
1429        { .fno = gen_helper_gvec_vclz16, },
1430        { .fni4 = gen_clz_i32, },
1431        { .fni8 = gen_clz_i64, },
1432    };
1433
1434    if (es > ES_64) {
1435        gen_program_exception(s, PGM_SPECIFICATION);
1436        return DISAS_NORETURN;
1437    }
1438    gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
1439    return DISAS_NEXT;
1440}
1441
1442static void gen_ctz_i32(TCGv_i32 d, TCGv_i32 a)
1443{
1444    tcg_gen_ctzi_i32(d, a, 32);
1445}
1446
1447static void gen_ctz_i64(TCGv_i64 d, TCGv_i64 a)
1448{
1449    tcg_gen_ctzi_i64(d, a, 64);
1450}
1451
1452static DisasJumpType op_vctz(DisasContext *s, DisasOps *o)
1453{
1454    const uint8_t es = get_field(s, m3);
1455    static const GVecGen2 g[4] = {
1456        { .fno = gen_helper_gvec_vctz8, },
1457        { .fno = gen_helper_gvec_vctz16, },
1458        { .fni4 = gen_ctz_i32, },
1459        { .fni8 = gen_ctz_i64, },
1460    };
1461
1462    if (es > ES_64) {
1463        gen_program_exception(s, PGM_SPECIFICATION);
1464        return DISAS_NORETURN;
1465    }
1466    gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
1467    return DISAS_NEXT;
1468}
1469
1470static DisasJumpType op_vx(DisasContext *s, DisasOps *o)
1471{
1472    gen_gvec_fn_3(xor, ES_8, get_field(s, v1), get_field(s, v2),
1473                 get_field(s, v3));
1474    return DISAS_NEXT;
1475}
1476
1477static DisasJumpType op_vgfm(DisasContext *s, DisasOps *o)
1478{
1479    const uint8_t es = get_field(s, m4);
1480    static const GVecGen3 g[4] = {
1481        { .fno = gen_helper_gvec_vgfm8, },
1482        { .fno = gen_helper_gvec_vgfm16, },
1483        { .fno = gen_helper_gvec_vgfm32, },
1484        { .fno = gen_helper_gvec_vgfm64, },
1485    };
1486
1487    if (es > ES_64) {
1488        gen_program_exception(s, PGM_SPECIFICATION);
1489        return DISAS_NORETURN;
1490    }
1491    gen_gvec_3(get_field(s, v1), get_field(s, v2),
1492               get_field(s, v3), &g[es]);
1493    return DISAS_NEXT;
1494}
1495
1496static DisasJumpType op_vgfma(DisasContext *s, DisasOps *o)
1497{
1498    const uint8_t es = get_field(s, m5);
1499    static const GVecGen4 g[4] = {
1500        { .fno = gen_helper_gvec_vgfma8, },
1501        { .fno = gen_helper_gvec_vgfma16, },
1502        { .fno = gen_helper_gvec_vgfma32, },
1503        { .fno = gen_helper_gvec_vgfma64, },
1504    };
1505
1506    if (es > ES_64) {
1507        gen_program_exception(s, PGM_SPECIFICATION);
1508        return DISAS_NORETURN;
1509    }
1510    gen_gvec_4(get_field(s, v1), get_field(s, v2),
1511               get_field(s, v3), get_field(s, v4), &g[es]);
1512    return DISAS_NEXT;
1513}
1514
1515static DisasJumpType op_vlc(DisasContext *s, DisasOps *o)
1516{
1517    const uint8_t es = get_field(s, m3);
1518
1519    if (es > ES_64) {
1520        gen_program_exception(s, PGM_SPECIFICATION);
1521        return DISAS_NORETURN;
1522    }
1523
1524    gen_gvec_fn_2(neg, es, get_field(s, v1), get_field(s, v2));
1525    return DISAS_NEXT;
1526}
1527
1528static DisasJumpType op_vlp(DisasContext *s, DisasOps *o)
1529{
1530    const uint8_t es = get_field(s, m3);
1531
1532    if (es > ES_64) {
1533        gen_program_exception(s, PGM_SPECIFICATION);
1534        return DISAS_NORETURN;
1535    }
1536
1537    gen_gvec_fn_2(abs, es, get_field(s, v1), get_field(s, v2));
1538    return DISAS_NEXT;
1539}
1540
1541static DisasJumpType op_vmx(DisasContext *s, DisasOps *o)
1542{
1543    const uint8_t v1 = get_field(s, v1);
1544    const uint8_t v2 = get_field(s, v2);
1545    const uint8_t v3 = get_field(s, v3);
1546    const uint8_t es = get_field(s, m4);
1547
1548    if (es > ES_64) {
1549        gen_program_exception(s, PGM_SPECIFICATION);
1550        return DISAS_NORETURN;
1551    }
1552
1553    switch (s->fields.op2) {
1554    case 0xff:
1555        gen_gvec_fn_3(smax, es, v1, v2, v3);
1556        break;
1557    case 0xfd:
1558        gen_gvec_fn_3(umax, es, v1, v2, v3);
1559        break;
1560    case 0xfe:
1561        gen_gvec_fn_3(smin, es, v1, v2, v3);
1562        break;
1563    case 0xfc:
1564        gen_gvec_fn_3(umin, es, v1, v2, v3);
1565        break;
1566    default:
1567        g_assert_not_reached();
1568    }
1569    return DISAS_NEXT;
1570}
1571
1572static void gen_mal_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
1573{
1574    TCGv_i32 t0 = tcg_temp_new_i32();
1575
1576    tcg_gen_mul_i32(t0, a, b);
1577    tcg_gen_add_i32(d, t0, c);
1578
1579    tcg_temp_free_i32(t0);
1580}
1581
1582static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
1583{
1584    TCGv_i64 t0 = tcg_temp_new_i64();
1585    TCGv_i64 t1 = tcg_temp_new_i64();
1586    TCGv_i64 t2 = tcg_temp_new_i64();
1587
1588    tcg_gen_ext_i32_i64(t0, a);
1589    tcg_gen_ext_i32_i64(t1, b);
1590    tcg_gen_ext_i32_i64(t2, c);
1591    tcg_gen_mul_i64(t0, t0, t1);
1592    tcg_gen_add_i64(t0, t0, t2);
1593    tcg_gen_extrh_i64_i32(d, t0);
1594
1595    tcg_temp_free(t0);
1596    tcg_temp_free(t1);
1597    tcg_temp_free(t2);
1598}
1599
1600static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
1601{
1602    TCGv_i64 t0 = tcg_temp_new_i64();
1603    TCGv_i64 t1 = tcg_temp_new_i64();
1604    TCGv_i64 t2 = tcg_temp_new_i64();
1605
1606    tcg_gen_extu_i32_i64(t0, a);
1607    tcg_gen_extu_i32_i64(t1, b);
1608    tcg_gen_extu_i32_i64(t2, c);
1609    tcg_gen_mul_i64(t0, t0, t1);
1610    tcg_gen_add_i64(t0, t0, t2);
1611    tcg_gen_extrh_i64_i32(d, t0);
1612
1613    tcg_temp_free(t0);
1614    tcg_temp_free(t1);
1615    tcg_temp_free(t2);
1616}
1617
1618static DisasJumpType op_vma(DisasContext *s, DisasOps *o)
1619{
1620    const uint8_t es = get_field(s, m5);
1621    static const GVecGen4 g_vmal[3] = {
1622        { .fno = gen_helper_gvec_vmal8, },
1623        { .fno = gen_helper_gvec_vmal16, },
1624        { .fni4 = gen_mal_i32, },
1625    };
1626    static const GVecGen4 g_vmah[3] = {
1627        { .fno = gen_helper_gvec_vmah8, },
1628        { .fno = gen_helper_gvec_vmah16, },
1629        { .fni4 = gen_mah_i32, },
1630    };
1631    static const GVecGen4 g_vmalh[3] = {
1632        { .fno = gen_helper_gvec_vmalh8, },
1633        { .fno = gen_helper_gvec_vmalh16, },
1634        { .fni4 = gen_malh_i32, },
1635    };
1636    static const GVecGen4 g_vmae[3] = {
1637        { .fno = gen_helper_gvec_vmae8, },
1638        { .fno = gen_helper_gvec_vmae16, },
1639        { .fno = gen_helper_gvec_vmae32, },
1640    };
1641    static const GVecGen4 g_vmale[3] = {
1642        { .fno = gen_helper_gvec_vmale8, },
1643        { .fno = gen_helper_gvec_vmale16, },
1644        { .fno = gen_helper_gvec_vmale32, },
1645    };
1646    static const GVecGen4 g_vmao[3] = {
1647        { .fno = gen_helper_gvec_vmao8, },
1648        { .fno = gen_helper_gvec_vmao16, },
1649        { .fno = gen_helper_gvec_vmao32, },
1650    };
1651    static const GVecGen4 g_vmalo[3] = {
1652        { .fno = gen_helper_gvec_vmalo8, },
1653        { .fno = gen_helper_gvec_vmalo16, },
1654        { .fno = gen_helper_gvec_vmalo32, },
1655    };
1656    const GVecGen4 *fn;
1657
1658    if (es > ES_32) {
1659        gen_program_exception(s, PGM_SPECIFICATION);
1660        return DISAS_NORETURN;
1661    }
1662
1663    switch (s->fields.op2) {
1664    case 0xaa:
1665        fn = &g_vmal[es];
1666        break;
1667    case 0xab:
1668        fn = &g_vmah[es];
1669        break;
1670    case 0xa9:
1671        fn = &g_vmalh[es];
1672        break;
1673    case 0xae:
1674        fn = &g_vmae[es];
1675        break;
1676    case 0xac:
1677        fn = &g_vmale[es];
1678        break;
1679    case 0xaf:
1680        fn = &g_vmao[es];
1681        break;
1682    case 0xad:
1683        fn = &g_vmalo[es];
1684        break;
1685    default:
1686        g_assert_not_reached();
1687    }
1688
1689    gen_gvec_4(get_field(s, v1), get_field(s, v2),
1690               get_field(s, v3), get_field(s, v4), fn);
1691    return DISAS_NEXT;
1692}
1693
1694static void gen_mh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1695{
1696    TCGv_i32 t = tcg_temp_new_i32();
1697
1698    tcg_gen_muls2_i32(t, d, a, b);
1699    tcg_temp_free_i32(t);
1700}
1701
1702static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1703{
1704    TCGv_i32 t = tcg_temp_new_i32();
1705
1706    tcg_gen_mulu2_i32(t, d, a, b);
1707    tcg_temp_free_i32(t);
1708}
1709
1710static DisasJumpType op_vm(DisasContext *s, DisasOps *o)
1711{
1712    const uint8_t es = get_field(s, m4);
1713    static const GVecGen3 g_vmh[3] = {
1714        { .fno = gen_helper_gvec_vmh8, },
1715        { .fno = gen_helper_gvec_vmh16, },
1716        { .fni4 = gen_mh_i32, },
1717    };
1718    static const GVecGen3 g_vmlh[3] = {
1719        { .fno = gen_helper_gvec_vmlh8, },
1720        { .fno = gen_helper_gvec_vmlh16, },
1721        { .fni4 = gen_mlh_i32, },
1722    };
1723    static const GVecGen3 g_vme[3] = {
1724        { .fno = gen_helper_gvec_vme8, },
1725        { .fno = gen_helper_gvec_vme16, },
1726        { .fno = gen_helper_gvec_vme32, },
1727    };
1728    static const GVecGen3 g_vmle[3] = {
1729        { .fno = gen_helper_gvec_vmle8, },
1730        { .fno = gen_helper_gvec_vmle16, },
1731        { .fno = gen_helper_gvec_vmle32, },
1732    };
1733    static const GVecGen3 g_vmo[3] = {
1734        { .fno = gen_helper_gvec_vmo8, },
1735        { .fno = gen_helper_gvec_vmo16, },
1736        { .fno = gen_helper_gvec_vmo32, },
1737    };
1738    static const GVecGen3 g_vmlo[3] = {
1739        { .fno = gen_helper_gvec_vmlo8, },
1740        { .fno = gen_helper_gvec_vmlo16, },
1741        { .fno = gen_helper_gvec_vmlo32, },
1742    };
1743    const GVecGen3 *fn;
1744
1745    if (es > ES_32) {
1746        gen_program_exception(s, PGM_SPECIFICATION);
1747        return DISAS_NORETURN;
1748    }
1749
1750    switch (s->fields.op2) {
1751    case 0xa2:
1752        gen_gvec_fn_3(mul, es, get_field(s, v1),
1753                      get_field(s, v2), get_field(s, v3));
1754        return DISAS_NEXT;
1755    case 0xa3:
1756        fn = &g_vmh[es];
1757        break;
1758    case 0xa1:
1759        fn = &g_vmlh[es];
1760        break;
1761    case 0xa6:
1762        fn = &g_vme[es];
1763        break;
1764    case 0xa4:
1765        fn = &g_vmle[es];
1766        break;
1767    case 0xa7:
1768        fn = &g_vmo[es];
1769        break;
1770    case 0xa5:
1771        fn = &g_vmlo[es];
1772        break;
1773    default:
1774        g_assert_not_reached();
1775    }
1776
1777    gen_gvec_3(get_field(s, v1), get_field(s, v2),
1778               get_field(s, v3), fn);
1779    return DISAS_NEXT;
1780}
1781
1782static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o)
1783{
1784    TCGv_i64 l1, h1, l2, h2;
1785
1786    if (get_field(s, m5) != ES_64) {
1787        gen_program_exception(s, PGM_SPECIFICATION);
1788        return DISAS_NORETURN;
1789    }
1790
1791    l1 = tcg_temp_new_i64();
1792    h1 = tcg_temp_new_i64();
1793    l2 = tcg_temp_new_i64();
1794    h2 = tcg_temp_new_i64();
1795
1796    /* Multipy both even elements from v2 and v3 */
1797    read_vec_element_i64(l1, get_field(s, v2), 0, ES_64);
1798    read_vec_element_i64(h1, get_field(s, v3), 0, ES_64);
1799    tcg_gen_mulu2_i64(l1, h1, l1, h1);
1800    /* Shift result left by one (x2) if requested */
1801    if (extract32(get_field(s, m6), 3, 1)) {
1802        tcg_gen_add2_i64(l1, h1, l1, h1, l1, h1);
1803    }
1804
1805    /* Multipy both odd elements from v2 and v3 */
1806    read_vec_element_i64(l2, get_field(s, v2), 1, ES_64);
1807    read_vec_element_i64(h2, get_field(s, v3), 1, ES_64);
1808    tcg_gen_mulu2_i64(l2, h2, l2, h2);
1809    /* Shift result left by one (x2) if requested */
1810    if (extract32(get_field(s, m6), 2, 1)) {
1811        tcg_gen_add2_i64(l2, h2, l2, h2, l2, h2);
1812    }
1813
1814    /* Add both intermediate results */
1815    tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2);
1816    /* Add whole v4 */
1817    read_vec_element_i64(h2, get_field(s, v4), 0, ES_64);
1818    read_vec_element_i64(l2, get_field(s, v4), 1, ES_64);
1819    tcg_gen_add2_i64(l1, h1, l1, h1, l2, h2);
1820
1821    /* Store final result into v1. */
1822    write_vec_element_i64(h1, get_field(s, v1), 0, ES_64);
1823    write_vec_element_i64(l1, get_field(s, v1), 1, ES_64);
1824
1825    tcg_temp_free_i64(l1);
1826    tcg_temp_free_i64(h1);
1827    tcg_temp_free_i64(l2);
1828    tcg_temp_free_i64(h2);
1829    return DISAS_NEXT;
1830}
1831
1832static DisasJumpType op_vnn(DisasContext *s, DisasOps *o)
1833{
1834    gen_gvec_fn_3(nand, ES_8, get_field(s, v1),
1835                  get_field(s, v2), get_field(s, v3));
1836    return DISAS_NEXT;
1837}
1838
1839static DisasJumpType op_vno(DisasContext *s, DisasOps *o)
1840{
1841    gen_gvec_fn_3(nor, ES_8, get_field(s, v1), get_field(s, v2),
1842                  get_field(s, v3));
1843    return DISAS_NEXT;
1844}
1845
1846static DisasJumpType op_vnx(DisasContext *s, DisasOps *o)
1847{
1848    gen_gvec_fn_3(eqv, ES_8, get_field(s, v1), get_field(s, v2),
1849                  get_field(s, v3));
1850    return DISAS_NEXT;
1851}
1852
1853static DisasJumpType op_vo(DisasContext *s, DisasOps *o)
1854{
1855    gen_gvec_fn_3(or, ES_8, get_field(s, v1), get_field(s, v2),
1856                  get_field(s, v3));
1857    return DISAS_NEXT;
1858}
1859
1860static DisasJumpType op_voc(DisasContext *s, DisasOps *o)
1861{
1862    gen_gvec_fn_3(orc, ES_8, get_field(s, v1), get_field(s, v2),
1863                  get_field(s, v3));
1864    return DISAS_NEXT;
1865}
1866
1867static DisasJumpType op_vpopct(DisasContext *s, DisasOps *o)
1868{
1869    const uint8_t es = get_field(s, m3);
1870    static const GVecGen2 g[4] = {
1871        { .fno = gen_helper_gvec_vpopct8, },
1872        { .fno = gen_helper_gvec_vpopct16, },
1873        { .fni4 = tcg_gen_ctpop_i32, },
1874        { .fni8 = tcg_gen_ctpop_i64, },
1875    };
1876
1877    if (es > ES_64 || (es != ES_8 && !s390_has_feat(S390_FEAT_VECTOR_ENH))) {
1878        gen_program_exception(s, PGM_SPECIFICATION);
1879        return DISAS_NORETURN;
1880    }
1881
1882    gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
1883    return DISAS_NEXT;
1884}
1885
1886static void gen_rim_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c)
1887{
1888    TCGv_i32 t = tcg_temp_new_i32();
1889
1890    tcg_gen_rotli_i32(t, a, c & 31);
1891    tcg_gen_and_i32(t, t, b);
1892    tcg_gen_andc_i32(d, d, b);
1893    tcg_gen_or_i32(d, d, t);
1894
1895    tcg_temp_free_i32(t);
1896}
1897
1898static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c)
1899{
1900    TCGv_i64 t = tcg_temp_new_i64();
1901
1902    tcg_gen_rotli_i64(t, a, c & 63);
1903    tcg_gen_and_i64(t, t, b);
1904    tcg_gen_andc_i64(d, d, b);
1905    tcg_gen_or_i64(d, d, t);
1906
1907    tcg_temp_free_i64(t);
1908}
1909
1910static DisasJumpType op_verim(DisasContext *s, DisasOps *o)
1911{
1912    const uint8_t es = get_field(s, m5);
1913    const uint8_t i4 = get_field(s, i4) &
1914                       (NUM_VEC_ELEMENT_BITS(es) - 1);
1915    static const GVecGen3i g[4] = {
1916        { .fno = gen_helper_gvec_verim8, },
1917        { .fno = gen_helper_gvec_verim16, },
1918        { .fni4 = gen_rim_i32,
1919          .load_dest = true, },
1920        { .fni8 = gen_rim_i64,
1921          .load_dest = true, },
1922    };
1923
1924    if (es > ES_64) {
1925        gen_program_exception(s, PGM_SPECIFICATION);
1926        return DISAS_NORETURN;
1927    }
1928
1929    gen_gvec_3i(get_field(s, v1), get_field(s, v2),
1930                get_field(s, v3), i4, &g[es]);
1931    return DISAS_NEXT;
1932}
1933
1934static DisasJumpType op_vesv(DisasContext *s, DisasOps *o)
1935{
1936    const uint8_t es = get_field(s, m4);
1937    const uint8_t v1 = get_field(s, v1);
1938    const uint8_t v2 = get_field(s, v2);
1939    const uint8_t v3 = get_field(s, v3);
1940
1941    if (es > ES_64) {
1942        gen_program_exception(s, PGM_SPECIFICATION);
1943        return DISAS_NORETURN;
1944    }
1945
1946    switch (s->fields.op2) {
1947    case 0x70:
1948        gen_gvec_fn_3(shlv, es, v1, v2, v3);
1949        break;
1950    case 0x73:
1951        gen_gvec_fn_3(rotlv, es, v1, v2, v3);
1952        break;
1953    case 0x7a:
1954        gen_gvec_fn_3(sarv, es, v1, v2, v3);
1955        break;
1956    case 0x78:
1957        gen_gvec_fn_3(shrv, es, v1, v2, v3);
1958        break;
1959    default:
1960        g_assert_not_reached();
1961    }
1962    return DISAS_NEXT;
1963}
1964
1965static DisasJumpType op_ves(DisasContext *s, DisasOps *o)
1966{
1967    const uint8_t es = get_field(s, m4);
1968    const uint8_t d2 = get_field(s, d2) &
1969                       (NUM_VEC_ELEMENT_BITS(es) - 1);
1970    const uint8_t v1 = get_field(s, v1);
1971    const uint8_t v3 = get_field(s, v3);
1972    TCGv_i32 shift;
1973
1974    if (es > ES_64) {
1975        gen_program_exception(s, PGM_SPECIFICATION);
1976        return DISAS_NORETURN;
1977    }
1978
1979    if (likely(!get_field(s, b2))) {
1980        switch (s->fields.op2) {
1981        case 0x30:
1982            gen_gvec_fn_2i(shli, es, v1, v3, d2);
1983            break;
1984        case 0x33:
1985            gen_gvec_fn_2i(rotli, es, v1, v3, d2);
1986            break;
1987        case 0x3a:
1988            gen_gvec_fn_2i(sari, es, v1, v3, d2);
1989            break;
1990        case 0x38:
1991            gen_gvec_fn_2i(shri, es, v1, v3, d2);
1992            break;
1993        default:
1994            g_assert_not_reached();
1995        }
1996    } else {
1997        shift = tcg_temp_new_i32();
1998        tcg_gen_extrl_i64_i32(shift, o->addr1);
1999        tcg_gen_andi_i32(shift, shift, NUM_VEC_ELEMENT_BITS(es) - 1);
2000        switch (s->fields.op2) {
2001        case 0x30:
2002            gen_gvec_fn_2s(shls, es, v1, v3, shift);
2003            break;
2004        case 0x33:
2005            gen_gvec_fn_2s(rotls, es, v1, v3, shift);
2006            break;
2007        case 0x3a:
2008            gen_gvec_fn_2s(sars, es, v1, v3, shift);
2009            break;
2010        case 0x38:
2011            gen_gvec_fn_2s(shrs, es, v1, v3, shift);
2012            break;
2013        default:
2014            g_assert_not_reached();
2015        }
2016        tcg_temp_free_i32(shift);
2017    }
2018    return DISAS_NEXT;
2019}
2020
2021static DisasJumpType op_vsl(DisasContext *s, DisasOps *o)
2022{
2023    TCGv_i64 shift = tcg_temp_new_i64();
2024
2025    read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
2026    if (s->fields.op2 == 0x74) {
2027        tcg_gen_andi_i64(shift, shift, 0x7);
2028    } else {
2029        tcg_gen_andi_i64(shift, shift, 0x78);
2030    }
2031
2032    gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
2033                    shift, 0, gen_helper_gvec_vsl);
2034    tcg_temp_free_i64(shift);
2035    return DISAS_NEXT;
2036}
2037
2038static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o)
2039{
2040    const uint8_t i4 = get_field(s, i4) & 0xf;
2041    const int left_shift = (i4 & 7) * 8;
2042    const int right_shift = 64 - left_shift;
2043    TCGv_i64 t0 = tcg_temp_new_i64();
2044    TCGv_i64 t1 = tcg_temp_new_i64();
2045    TCGv_i64 t2 = tcg_temp_new_i64();
2046
2047    if ((i4 & 8) == 0) {
2048        read_vec_element_i64(t0, get_field(s, v2), 0, ES_64);
2049        read_vec_element_i64(t1, get_field(s, v2), 1, ES_64);
2050        read_vec_element_i64(t2, get_field(s, v3), 0, ES_64);
2051    } else {
2052        read_vec_element_i64(t0, get_field(s, v2), 1, ES_64);
2053        read_vec_element_i64(t1, get_field(s, v3), 0, ES_64);
2054        read_vec_element_i64(t2, get_field(s, v3), 1, ES_64);
2055    }
2056    tcg_gen_extract2_i64(t0, t1, t0, right_shift);
2057    tcg_gen_extract2_i64(t1, t2, t1, right_shift);
2058    write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
2059    write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
2060
2061    tcg_temp_free(t0);
2062    tcg_temp_free(t1);
2063    tcg_temp_free(t2);
2064    return DISAS_NEXT;
2065}
2066
2067static DisasJumpType op_vsra(DisasContext *s, DisasOps *o)
2068{
2069    TCGv_i64 shift = tcg_temp_new_i64();
2070
2071    read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
2072    if (s->fields.op2 == 0x7e) {
2073        tcg_gen_andi_i64(shift, shift, 0x7);
2074    } else {
2075        tcg_gen_andi_i64(shift, shift, 0x78);
2076    }
2077
2078    gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
2079                    shift, 0, gen_helper_gvec_vsra);
2080    tcg_temp_free_i64(shift);
2081    return DISAS_NEXT;
2082}
2083
2084static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o)
2085{
2086    TCGv_i64 shift = tcg_temp_new_i64();
2087
2088    read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
2089    if (s->fields.op2 == 0x7c) {
2090        tcg_gen_andi_i64(shift, shift, 0x7);
2091    } else {
2092        tcg_gen_andi_i64(shift, shift, 0x78);
2093    }
2094
2095    gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
2096                    shift, 0, gen_helper_gvec_vsrl);
2097    tcg_temp_free_i64(shift);
2098    return DISAS_NEXT;
2099}
2100
2101static DisasJumpType op_vs(DisasContext *s, DisasOps *o)
2102{
2103    const uint8_t es = get_field(s, m4);
2104
2105    if (es > ES_128) {
2106        gen_program_exception(s, PGM_SPECIFICATION);
2107        return DISAS_NORETURN;
2108    } else if (es == ES_128) {
2109        gen_gvec128_3_i64(tcg_gen_sub2_i64, get_field(s, v1),
2110                          get_field(s, v2), get_field(s, v3));
2111        return DISAS_NEXT;
2112    }
2113    gen_gvec_fn_3(sub, es, get_field(s, v1), get_field(s, v2),
2114                  get_field(s, v3));
2115    return DISAS_NEXT;
2116}
2117
2118static void gen_scbi_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
2119{
2120    tcg_gen_setcond_i32(TCG_COND_GEU, d, a, b);
2121}
2122
2123static void gen_scbi_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
2124{
2125    tcg_gen_setcond_i64(TCG_COND_GEU, d, a, b);
2126}
2127
2128static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
2129                          TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
2130{
2131    TCGv_i64 th = tcg_temp_new_i64();
2132    TCGv_i64 tl = tcg_temp_new_i64();
2133    TCGv_i64 zero = tcg_const_i64(0);
2134
2135    tcg_gen_sub2_i64(tl, th, al, zero, bl, zero);
2136    tcg_gen_andi_i64(th, th, 1);
2137    tcg_gen_sub2_i64(tl, th, ah, zero, th, zero);
2138    tcg_gen_sub2_i64(tl, th, tl, th, bh, zero);
2139    /* "invert" the result: -1 -> 0; 0 -> 1 */
2140    tcg_gen_addi_i64(dl, th, 1);
2141    tcg_gen_mov_i64(dh, zero);
2142
2143    tcg_temp_free_i64(th);
2144    tcg_temp_free_i64(tl);
2145    tcg_temp_free_i64(zero);
2146}
2147
2148static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o)
2149{
2150    const uint8_t es = get_field(s, m4);
2151    static const GVecGen3 g[4] = {
2152        { .fno = gen_helper_gvec_vscbi8, },
2153        { .fno = gen_helper_gvec_vscbi16, },
2154        { .fni4 = gen_scbi_i32, },
2155        { .fni8 = gen_scbi_i64, },
2156    };
2157
2158    if (es > ES_128) {
2159        gen_program_exception(s, PGM_SPECIFICATION);
2160        return DISAS_NORETURN;
2161    } else if (es == ES_128) {
2162        gen_gvec128_3_i64(gen_scbi2_i64, get_field(s, v1),
2163                          get_field(s, v2), get_field(s, v3));
2164        return DISAS_NEXT;
2165    }
2166    gen_gvec_3(get_field(s, v1), get_field(s, v2),
2167               get_field(s, v3), &g[es]);
2168    return DISAS_NEXT;
2169}
2170
2171static void gen_sbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
2172                         TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
2173{
2174    TCGv_i64 tl = tcg_temp_new_i64();
2175    TCGv_i64 th = tcg_temp_new_i64();
2176
2177    tcg_gen_not_i64(tl, bl);
2178    tcg_gen_not_i64(th, bh);
2179    gen_ac2_i64(dl, dh, al, ah, tl, th, cl, ch);
2180    tcg_temp_free_i64(tl);
2181    tcg_temp_free_i64(th);
2182}
2183
2184static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o)
2185{
2186    if (get_field(s, m5) != ES_128) {
2187        gen_program_exception(s, PGM_SPECIFICATION);
2188        return DISAS_NORETURN;
2189    }
2190
2191    gen_gvec128_4_i64(gen_sbi2_i64, get_field(s, v1),
2192                      get_field(s, v2), get_field(s, v3),
2193                      get_field(s, v4));
2194    return DISAS_NEXT;
2195}
2196
2197static void gen_sbcbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
2198                           TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
2199{
2200    TCGv_i64 th = tcg_temp_new_i64();
2201    TCGv_i64 tl = tcg_temp_new_i64();
2202
2203    tcg_gen_not_i64(tl, bl);
2204    tcg_gen_not_i64(th, bh);
2205    gen_accc2_i64(dl, dh, al, ah, tl, th, cl, ch);
2206
2207    tcg_temp_free_i64(tl);
2208    tcg_temp_free_i64(th);
2209}
2210
2211static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o)
2212{
2213    if (get_field(s, m5) != ES_128) {
2214        gen_program_exception(s, PGM_SPECIFICATION);
2215        return DISAS_NORETURN;
2216    }
2217
2218    gen_gvec128_4_i64(gen_sbcbi2_i64, get_field(s, v1),
2219                      get_field(s, v2), get_field(s, v3),
2220                      get_field(s, v4));
2221    return DISAS_NEXT;
2222}
2223
2224static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o)
2225{
2226    const uint8_t es = get_field(s, m4);
2227    TCGv_i64 sum, tmp;
2228    uint8_t dst_idx;
2229
2230    if (es == ES_8 || es > ES_32) {
2231        gen_program_exception(s, PGM_SPECIFICATION);
2232        return DISAS_NORETURN;
2233    }
2234
2235    sum = tcg_temp_new_i64();
2236    tmp = tcg_temp_new_i64();
2237    for (dst_idx = 0; dst_idx < 2; dst_idx++) {
2238        uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 2;
2239        const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 2 - 1;
2240
2241        read_vec_element_i64(sum, get_field(s, v3), max_idx, es);
2242        for (; idx <= max_idx; idx++) {
2243            read_vec_element_i64(tmp, get_field(s, v2), idx, es);
2244            tcg_gen_add_i64(sum, sum, tmp);
2245        }
2246        write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64);
2247    }
2248    tcg_temp_free_i64(sum);
2249    tcg_temp_free_i64(tmp);
2250    return DISAS_NEXT;
2251}
2252
2253static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o)
2254{
2255    const uint8_t es = get_field(s, m4);
2256    const uint8_t max_idx = NUM_VEC_ELEMENTS(es) - 1;
2257    TCGv_i64 sumh, suml, zero, tmpl;
2258    uint8_t idx;
2259
2260    if (es < ES_32 || es > ES_64) {
2261        gen_program_exception(s, PGM_SPECIFICATION);
2262        return DISAS_NORETURN;
2263    }
2264
2265    sumh = tcg_const_i64(0);
2266    suml = tcg_temp_new_i64();
2267    zero = tcg_const_i64(0);
2268    tmpl = tcg_temp_new_i64();
2269
2270    read_vec_element_i64(suml, get_field(s, v3), max_idx, es);
2271    for (idx = 0; idx <= max_idx; idx++) {
2272        read_vec_element_i64(tmpl, get_field(s, v2), idx, es);
2273        tcg_gen_add2_i64(suml, sumh, suml, sumh, tmpl, zero);
2274    }
2275    write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64);
2276    write_vec_element_i64(suml, get_field(s, v1), 1, ES_64);
2277
2278    tcg_temp_free_i64(sumh);
2279    tcg_temp_free_i64(suml);
2280    tcg_temp_free_i64(zero);
2281    tcg_temp_free_i64(tmpl);
2282    return DISAS_NEXT;
2283}
2284
2285static DisasJumpType op_vsum(DisasContext *s, DisasOps *o)
2286{
2287    const uint8_t es = get_field(s, m4);
2288    TCGv_i32 sum, tmp;
2289    uint8_t dst_idx;
2290
2291    if (es > ES_16) {
2292        gen_program_exception(s, PGM_SPECIFICATION);
2293        return DISAS_NORETURN;
2294    }
2295
2296    sum = tcg_temp_new_i32();
2297    tmp = tcg_temp_new_i32();
2298    for (dst_idx = 0; dst_idx < 4; dst_idx++) {
2299        uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 4;
2300        const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 4 - 1;
2301
2302        read_vec_element_i32(sum, get_field(s, v3), max_idx, es);
2303        for (; idx <= max_idx; idx++) {
2304            read_vec_element_i32(tmp, get_field(s, v2), idx, es);
2305            tcg_gen_add_i32(sum, sum, tmp);
2306        }
2307        write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32);
2308    }
2309    tcg_temp_free_i32(sum);
2310    tcg_temp_free_i32(tmp);
2311    return DISAS_NEXT;
2312}
2313
2314static DisasJumpType op_vtm(DisasContext *s, DisasOps *o)
2315{
2316    gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
2317                   cpu_env, 0, gen_helper_gvec_vtm);
2318    set_cc_static(s);
2319    return DISAS_NEXT;
2320}
2321
2322static DisasJumpType op_vfae(DisasContext *s, DisasOps *o)
2323{
2324    const uint8_t es = get_field(s, m4);
2325    const uint8_t m5 = get_field(s, m5);
2326    static gen_helper_gvec_3 * const g[3] = {
2327        gen_helper_gvec_vfae8,
2328        gen_helper_gvec_vfae16,
2329        gen_helper_gvec_vfae32,
2330    };
2331    static gen_helper_gvec_3_ptr * const g_cc[3] = {
2332        gen_helper_gvec_vfae_cc8,
2333        gen_helper_gvec_vfae_cc16,
2334        gen_helper_gvec_vfae_cc32,
2335    };
2336    if (es > ES_32) {
2337        gen_program_exception(s, PGM_SPECIFICATION);
2338        return DISAS_NORETURN;
2339    }
2340
2341    if (extract32(m5, 0, 1)) {
2342        gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2343                       get_field(s, v3), cpu_env, m5, g_cc[es]);
2344        set_cc_static(s);
2345    } else {
2346        gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
2347                       get_field(s, v3), m5, g[es]);
2348    }
2349    return DISAS_NEXT;
2350}
2351
2352static DisasJumpType op_vfee(DisasContext *s, DisasOps *o)
2353{
2354    const uint8_t es = get_field(s, m4);
2355    const uint8_t m5 = get_field(s, m5);
2356    static gen_helper_gvec_3 * const g[3] = {
2357        gen_helper_gvec_vfee8,
2358        gen_helper_gvec_vfee16,
2359        gen_helper_gvec_vfee32,
2360    };
2361    static gen_helper_gvec_3_ptr * const g_cc[3] = {
2362        gen_helper_gvec_vfee_cc8,
2363        gen_helper_gvec_vfee_cc16,
2364        gen_helper_gvec_vfee_cc32,
2365    };
2366
2367    if (es > ES_32 || m5 & ~0x3) {
2368        gen_program_exception(s, PGM_SPECIFICATION);
2369        return DISAS_NORETURN;
2370    }
2371
2372    if (extract32(m5, 0, 1)) {
2373        gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2374                       get_field(s, v3), cpu_env, m5, g_cc[es]);
2375        set_cc_static(s);
2376    } else {
2377        gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
2378                       get_field(s, v3), m5, g[es]);
2379    }
2380    return DISAS_NEXT;
2381}
2382
2383static DisasJumpType op_vfene(DisasContext *s, DisasOps *o)
2384{
2385    const uint8_t es = get_field(s, m4);
2386    const uint8_t m5 = get_field(s, m5);
2387    static gen_helper_gvec_3 * const g[3] = {
2388        gen_helper_gvec_vfene8,
2389        gen_helper_gvec_vfene16,
2390        gen_helper_gvec_vfene32,
2391    };
2392    static gen_helper_gvec_3_ptr * const g_cc[3] = {
2393        gen_helper_gvec_vfene_cc8,
2394        gen_helper_gvec_vfene_cc16,
2395        gen_helper_gvec_vfene_cc32,
2396    };
2397
2398    if (es > ES_32 || m5 & ~0x3) {
2399        gen_program_exception(s, PGM_SPECIFICATION);
2400        return DISAS_NORETURN;
2401    }
2402
2403    if (extract32(m5, 0, 1)) {
2404        gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2405                       get_field(s, v3), cpu_env, m5, g_cc[es]);
2406        set_cc_static(s);
2407    } else {
2408        gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
2409                       get_field(s, v3), m5, g[es]);
2410    }
2411    return DISAS_NEXT;
2412}
2413
2414static DisasJumpType op_vistr(DisasContext *s, DisasOps *o)
2415{
2416    const uint8_t es = get_field(s, m4);
2417    const uint8_t m5 = get_field(s, m5);
2418    static gen_helper_gvec_2 * const g[3] = {
2419        gen_helper_gvec_vistr8,
2420        gen_helper_gvec_vistr16,
2421        gen_helper_gvec_vistr32,
2422    };
2423    static gen_helper_gvec_2_ptr * const g_cc[3] = {
2424        gen_helper_gvec_vistr_cc8,
2425        gen_helper_gvec_vistr_cc16,
2426        gen_helper_gvec_vistr_cc32,
2427    };
2428
2429    if (es > ES_32 || m5 & ~0x1) {
2430        gen_program_exception(s, PGM_SPECIFICATION);
2431        return DISAS_NORETURN;
2432    }
2433
2434    if (extract32(m5, 0, 1)) {
2435        gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
2436                       cpu_env, 0, g_cc[es]);
2437        set_cc_static(s);
2438    } else {
2439        gen_gvec_2_ool(get_field(s, v1), get_field(s, v2), 0,
2440                       g[es]);
2441    }
2442    return DISAS_NEXT;
2443}
2444
2445static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o)
2446{
2447    const uint8_t es = get_field(s, m5);
2448    const uint8_t m6 = get_field(s, m6);
2449    static gen_helper_gvec_4 * const g[3] = {
2450        gen_helper_gvec_vstrc8,
2451        gen_helper_gvec_vstrc16,
2452        gen_helper_gvec_vstrc32,
2453    };
2454    static gen_helper_gvec_4 * const g_rt[3] = {
2455        gen_helper_gvec_vstrc_rt8,
2456        gen_helper_gvec_vstrc_rt16,
2457        gen_helper_gvec_vstrc_rt32,
2458    };
2459    static gen_helper_gvec_4_ptr * const g_cc[3] = {
2460        gen_helper_gvec_vstrc_cc8,
2461        gen_helper_gvec_vstrc_cc16,
2462        gen_helper_gvec_vstrc_cc32,
2463    };
2464    static gen_helper_gvec_4_ptr * const g_cc_rt[3] = {
2465        gen_helper_gvec_vstrc_cc_rt8,
2466        gen_helper_gvec_vstrc_cc_rt16,
2467        gen_helper_gvec_vstrc_cc_rt32,
2468    };
2469
2470    if (es > ES_32) {
2471        gen_program_exception(s, PGM_SPECIFICATION);
2472        return DISAS_NORETURN;
2473    }
2474
2475    if (extract32(m6, 0, 1)) {
2476        if (extract32(m6, 2, 1)) {
2477            gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
2478                           get_field(s, v3), get_field(s, v4),
2479                           cpu_env, m6, g_cc_rt[es]);
2480        } else {
2481            gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
2482                           get_field(s, v3), get_field(s, v4),
2483                           cpu_env, m6, g_cc[es]);
2484        }
2485        set_cc_static(s);
2486    } else {
2487        if (extract32(m6, 2, 1)) {
2488            gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
2489                           get_field(s, v3), get_field(s, v4),
2490                           m6, g_rt[es]);
2491        } else {
2492            gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
2493                           get_field(s, v3), get_field(s, v4),
2494                           m6, g[es]);
2495        }
2496    }
2497    return DISAS_NEXT;
2498}
2499
2500static DisasJumpType op_vfa(DisasContext *s, DisasOps *o)
2501{
2502    const uint8_t fpf = get_field(s, m4);
2503    const uint8_t m5 = get_field(s, m5);
2504    gen_helper_gvec_3_ptr *fn = NULL;
2505
2506    switch (s->fields.op2) {
2507    case 0xe3:
2508        switch (fpf) {
2509        case FPF_SHORT:
2510            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2511                fn = gen_helper_gvec_vfa32;
2512            }
2513            break;
2514        case FPF_LONG:
2515            fn = gen_helper_gvec_vfa64;
2516            break;
2517        case FPF_EXT:
2518            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2519                fn = gen_helper_gvec_vfa128;
2520            }
2521            break;
2522        default:
2523            break;
2524        }
2525        break;
2526    case 0xe5:
2527        switch (fpf) {
2528        case FPF_SHORT:
2529            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2530                fn = gen_helper_gvec_vfd32;
2531            }
2532            break;
2533        case FPF_LONG:
2534            fn = gen_helper_gvec_vfd64;
2535            break;
2536        case FPF_EXT:
2537            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2538                fn = gen_helper_gvec_vfd128;
2539            }
2540            break;
2541        default:
2542            break;
2543        }
2544        break;
2545    case 0xe7:
2546        switch (fpf) {
2547        case FPF_SHORT:
2548            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2549                fn = gen_helper_gvec_vfm32;
2550            }
2551            break;
2552        case FPF_LONG:
2553            fn = gen_helper_gvec_vfm64;
2554            break;
2555        case FPF_EXT:
2556            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2557                fn = gen_helper_gvec_vfm128;
2558            }
2559            break;
2560        default:
2561            break;
2562        }
2563        break;
2564    case 0xe2:
2565        switch (fpf) {
2566        case FPF_SHORT:
2567            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2568                fn = gen_helper_gvec_vfs32;
2569            }
2570            break;
2571        case FPF_LONG:
2572            fn = gen_helper_gvec_vfs64;
2573            break;
2574        case FPF_EXT:
2575            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2576                fn = gen_helper_gvec_vfs128;
2577            }
2578            break;
2579        default:
2580            break;
2581        }
2582        break;
2583    default:
2584        g_assert_not_reached();
2585    }
2586
2587    if (!fn || extract32(m5, 0, 3)) {
2588        gen_program_exception(s, PGM_SPECIFICATION);
2589        return DISAS_NORETURN;
2590    }
2591
2592    gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2593                   get_field(s, v3), cpu_env, m5, fn);
2594    return DISAS_NEXT;
2595}
2596
2597static DisasJumpType op_wfc(DisasContext *s, DisasOps *o)
2598{
2599    const uint8_t fpf = get_field(s, m3);
2600    const uint8_t m4 = get_field(s, m4);
2601    gen_helper_gvec_2_ptr *fn = NULL;
2602
2603    switch (fpf) {
2604    case FPF_SHORT:
2605        if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2606            fn = gen_helper_gvec_wfk32;
2607            if (s->fields.op2 == 0xcb) {
2608                fn = gen_helper_gvec_wfc32;
2609            }
2610        }
2611        break;
2612    case FPF_LONG:
2613        fn = gen_helper_gvec_wfk64;
2614        if (s->fields.op2 == 0xcb) {
2615            fn = gen_helper_gvec_wfc64;
2616        }
2617        break;
2618    case FPF_EXT:
2619        if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2620            fn = gen_helper_gvec_wfk128;
2621            if (s->fields.op2 == 0xcb) {
2622                fn = gen_helper_gvec_wfc128;
2623            }
2624        }
2625        break;
2626    default:
2627        break;
2628    };
2629
2630    if (!fn || m4) {
2631        gen_program_exception(s, PGM_SPECIFICATION);
2632        return DISAS_NORETURN;
2633    }
2634
2635    gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, 0, fn);
2636    set_cc_static(s);
2637    return DISAS_NEXT;
2638}
2639
2640static DisasJumpType op_vfc(DisasContext *s, DisasOps *o)
2641{
2642    const uint8_t fpf = get_field(s, m4);
2643    const uint8_t m5 = get_field(s, m5);
2644    const uint8_t m6 = get_field(s, m6);
2645    const bool cs = extract32(m6, 0, 1);
2646    const bool sq = extract32(m5, 2, 1);
2647    gen_helper_gvec_3_ptr *fn = NULL;
2648
2649    switch (s->fields.op2) {
2650    case 0xe8:
2651        switch (fpf) {
2652        case FPF_SHORT:
2653            fn = cs ? gen_helper_gvec_vfce32_cc : gen_helper_gvec_vfce32;
2654            break;
2655        case FPF_LONG:
2656            fn = cs ? gen_helper_gvec_vfce64_cc : gen_helper_gvec_vfce64;
2657            break;
2658        case FPF_EXT:
2659            fn = cs ? gen_helper_gvec_vfce128_cc : gen_helper_gvec_vfce128;
2660            break;
2661        default:
2662            break;
2663        }
2664        break;
2665    case 0xeb:
2666        switch (fpf) {
2667        case FPF_SHORT:
2668            fn = cs ? gen_helper_gvec_vfch32_cc : gen_helper_gvec_vfch32;
2669            break;
2670        case FPF_LONG:
2671            fn = cs ? gen_helper_gvec_vfch64_cc : gen_helper_gvec_vfch64;
2672            break;
2673        case FPF_EXT:
2674            fn = cs ? gen_helper_gvec_vfch128_cc : gen_helper_gvec_vfch128;
2675            break;
2676        default:
2677            break;
2678        }
2679        break;
2680    case 0xea:
2681        switch (fpf) {
2682        case FPF_SHORT:
2683            fn = cs ? gen_helper_gvec_vfche32_cc : gen_helper_gvec_vfche32;
2684            break;
2685        case FPF_LONG:
2686            fn = cs ? gen_helper_gvec_vfche64_cc : gen_helper_gvec_vfche64;
2687            break;
2688        case FPF_EXT:
2689            fn = cs ? gen_helper_gvec_vfche128_cc : gen_helper_gvec_vfche128;
2690            break;
2691        default:
2692            break;
2693        }
2694        break;
2695    default:
2696        g_assert_not_reached();
2697    }
2698
2699    if (!fn || extract32(m5, 0, 2) || extract32(m6, 1, 3) ||
2700        (!s390_has_feat(S390_FEAT_VECTOR_ENH) && (fpf != FPF_LONG || sq))) {
2701        gen_program_exception(s, PGM_SPECIFICATION);
2702        return DISAS_NORETURN;
2703    }
2704
2705    gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3),
2706                   cpu_env, m5, fn);
2707    if (cs) {
2708        set_cc_static(s);
2709    }
2710    return DISAS_NEXT;
2711}
2712
2713static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o)
2714{
2715    const uint8_t fpf = get_field(s, m3);
2716    const uint8_t m4 = get_field(s, m4);
2717    const uint8_t erm = get_field(s, m5);
2718    gen_helper_gvec_2_ptr *fn = NULL;
2719
2720
2721    switch (s->fields.op2) {
2722    case 0xc3:
2723        if (fpf == FPF_LONG) {
2724            fn = gen_helper_gvec_vcdg64;
2725        }
2726        break;
2727    case 0xc1:
2728        if (fpf == FPF_LONG) {
2729            fn = gen_helper_gvec_vcdlg64;
2730        }
2731        break;
2732    case 0xc2:
2733        if (fpf == FPF_LONG) {
2734            fn = gen_helper_gvec_vcgd64;
2735        }
2736        break;
2737    case 0xc0:
2738        if (fpf == FPF_LONG) {
2739            fn = gen_helper_gvec_vclgd64;
2740        }
2741        break;
2742    case 0xc7:
2743        switch (fpf) {
2744        case FPF_SHORT:
2745            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2746                fn = gen_helper_gvec_vfi32;
2747            }
2748            break;
2749        case FPF_LONG:
2750            fn = gen_helper_gvec_vfi64;
2751            break;
2752        case FPF_EXT:
2753            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2754                fn = gen_helper_gvec_vfi128;
2755            }
2756            break;
2757        default:
2758            break;
2759        }
2760        break;
2761    case 0xc5:
2762        switch (fpf) {
2763        case FPF_LONG:
2764            fn = gen_helper_gvec_vflr64;
2765            break;
2766        case FPF_EXT:
2767            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2768                fn = gen_helper_gvec_vflr128;
2769            }
2770            break;
2771        default:
2772            break;
2773        }
2774        break;
2775    default:
2776        g_assert_not_reached();
2777    }
2778
2779    if (!fn || extract32(m4, 0, 2) || erm > 7 || erm == 2) {
2780        gen_program_exception(s, PGM_SPECIFICATION);
2781        return DISAS_NORETURN;
2782    }
2783
2784    gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
2785                   deposit32(m4, 4, 4, erm), fn);
2786    return DISAS_NEXT;
2787}
2788
2789static DisasJumpType op_vfll(DisasContext *s, DisasOps *o)
2790{
2791    const uint8_t fpf = get_field(s, m3);
2792    const uint8_t m4 = get_field(s, m4);
2793    gen_helper_gvec_2_ptr *fn = NULL;
2794
2795    switch (fpf) {
2796    case FPF_SHORT:
2797        fn = gen_helper_gvec_vfll32;
2798        break;
2799    case FPF_LONG:
2800        if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2801            fn = gen_helper_gvec_vfll64;
2802        }
2803        break;
2804    default:
2805        break;
2806    }
2807
2808    if (!fn || extract32(m4, 0, 3)) {
2809        gen_program_exception(s, PGM_SPECIFICATION);
2810        return DISAS_NORETURN;
2811    }
2812
2813    gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn);
2814    return DISAS_NEXT;
2815}
2816
2817static DisasJumpType op_vfmax(DisasContext *s, DisasOps *o)
2818{
2819    const uint8_t fpf = get_field(s, m4);
2820    const uint8_t m6 = get_field(s, m6);
2821    const uint8_t m5 = get_field(s, m5);
2822    gen_helper_gvec_3_ptr *fn;
2823
2824    if (m6 == 5 || m6 == 6 || m6 == 7 || m6 > 13) {
2825        gen_program_exception(s, PGM_SPECIFICATION);
2826        return DISAS_NORETURN;
2827    }
2828
2829    switch (fpf) {
2830    case FPF_SHORT:
2831        if (s->fields.op2 == 0xef) {
2832            fn = gen_helper_gvec_vfmax32;
2833        } else {
2834            fn = gen_helper_gvec_vfmin32;
2835        }
2836        break;
2837    case FPF_LONG:
2838        if (s->fields.op2 == 0xef) {
2839            fn = gen_helper_gvec_vfmax64;
2840        } else {
2841            fn = gen_helper_gvec_vfmin64;
2842        }
2843        break;
2844    case FPF_EXT:
2845        if (s->fields.op2 == 0xef) {
2846            fn = gen_helper_gvec_vfmax128;
2847        } else {
2848            fn = gen_helper_gvec_vfmin128;
2849        }
2850        break;
2851    default:
2852        gen_program_exception(s, PGM_SPECIFICATION);
2853        return DISAS_NORETURN;
2854    }
2855
2856    gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2), get_field(s, v3),
2857                   cpu_env, deposit32(m5, 4, 4, m6), fn);
2858    return DISAS_NEXT;
2859}
2860
2861static DisasJumpType op_vfma(DisasContext *s, DisasOps *o)
2862{
2863    const uint8_t m5 = get_field(s, m5);
2864    const uint8_t fpf = get_field(s, m6);
2865    gen_helper_gvec_4_ptr *fn = NULL;
2866
2867    switch (s->fields.op2) {
2868    case 0x8f:
2869        switch (fpf) {
2870        case FPF_SHORT:
2871            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2872                fn = gen_helper_gvec_vfma32;
2873            }
2874            break;
2875        case FPF_LONG:
2876            fn = gen_helper_gvec_vfma64;
2877            break;
2878        case FPF_EXT:
2879            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2880                fn = gen_helper_gvec_vfma128;
2881            }
2882            break;
2883        default:
2884            break;
2885        }
2886        break;
2887    case 0x8e:
2888        switch (fpf) {
2889        case FPF_SHORT:
2890            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2891                fn = gen_helper_gvec_vfms32;
2892            }
2893            break;
2894        case FPF_LONG:
2895            fn = gen_helper_gvec_vfms64;
2896            break;
2897        case FPF_EXT:
2898            if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
2899                fn = gen_helper_gvec_vfms128;
2900            }
2901            break;
2902        default:
2903            break;
2904        }
2905        break;
2906    case 0x9f:
2907        switch (fpf) {
2908        case FPF_SHORT:
2909            fn = gen_helper_gvec_vfnma32;
2910            break;
2911        case FPF_LONG:
2912            fn = gen_helper_gvec_vfnma64;
2913            break;
2914        case FPF_EXT:
2915            fn = gen_helper_gvec_vfnma128;
2916            break;
2917        default:
2918            break;
2919        }
2920        break;
2921    case 0x9e:
2922        switch (fpf) {
2923        case FPF_SHORT:
2924            fn = gen_helper_gvec_vfnms32;
2925            break;
2926        case FPF_LONG:
2927            fn = gen_helper_gvec_vfnms64;
2928            break;
2929        case FPF_EXT:
2930            fn = gen_helper_gvec_vfnms128;
2931            break;
2932        default:
2933            break;
2934        }
2935        break;
2936    default:
2937        g_assert_not_reached();
2938    }
2939
2940    if (!fn || extract32(m5, 0, 3)) {
2941        gen_program_exception(s, PGM_SPECIFICATION);
2942        return DISAS_NORETURN;
2943    }
2944
2945    gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
2946                   get_field(s, v3), get_field(s, v4), cpu_env, m5, fn);
2947    return DISAS_NEXT;
2948}
2949
2950static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o)
2951{
2952    const uint8_t v1 = get_field(s, v1);
2953    const uint8_t v2 = get_field(s, v2);
2954    const uint8_t fpf = get_field(s, m3);
2955    const uint8_t m4 = get_field(s, m4);
2956    const uint8_t m5 = get_field(s, m5);
2957    const bool se = extract32(m4, 3, 1);
2958    TCGv_i64 tmp;
2959
2960    if ((fpf != FPF_LONG && !s390_has_feat(S390_FEAT_VECTOR_ENH)) ||
2961        extract32(m4, 0, 3) || m5 > 2) {
2962        gen_program_exception(s, PGM_SPECIFICATION);
2963        return DISAS_NORETURN;
2964    }
2965
2966    switch (fpf) {
2967    case FPF_SHORT:
2968        if (!se) {
2969            switch (m5) {
2970            case 0:
2971                /* sign bit is inverted (complement) */
2972                gen_gvec_fn_2i(xori, ES_32, v1, v2, 1ull << 31);
2973                break;
2974            case 1:
2975                /* sign bit is set to one (negative) */
2976                gen_gvec_fn_2i(ori, ES_32, v1, v2, 1ull << 31);
2977                break;
2978            case 2:
2979                /* sign bit is set to zero (positive) */
2980                gen_gvec_fn_2i(andi, ES_32, v1, v2, (1ull << 31) - 1);
2981                break;
2982            }
2983            return DISAS_NEXT;
2984        }
2985        break;
2986    case FPF_LONG:
2987        if (!se) {
2988            switch (m5) {
2989            case 0:
2990                /* sign bit is inverted (complement) */
2991                gen_gvec_fn_2i(xori, ES_64, v1, v2, 1ull << 63);
2992                break;
2993            case 1:
2994                /* sign bit is set to one (negative) */
2995                gen_gvec_fn_2i(ori, ES_64, v1, v2, 1ull << 63);
2996                break;
2997            case 2:
2998                /* sign bit is set to zero (positive) */
2999                gen_gvec_fn_2i(andi, ES_64, v1, v2, (1ull << 63) - 1);
3000                break;
3001            }
3002            return DISAS_NEXT;
3003        }
3004        break;
3005    case FPF_EXT:
3006        /* Only a single element. */
3007        break;
3008    default:
3009        gen_program_exception(s, PGM_SPECIFICATION);
3010        return DISAS_NORETURN;
3011    }
3012
3013    /* With a single element, we are only interested in bit 0. */
3014    tmp = tcg_temp_new_i64();
3015    read_vec_element_i64(tmp, v2, 0, ES_64);
3016    switch (m5) {
3017    case 0:
3018        /* sign bit is inverted (complement) */
3019        tcg_gen_xori_i64(tmp, tmp, 1ull << 63);
3020        break;
3021    case 1:
3022        /* sign bit is set to one (negative) */
3023        tcg_gen_ori_i64(tmp, tmp, 1ull << 63);
3024        break;
3025    case 2:
3026        /* sign bit is set to zero (positive) */
3027        tcg_gen_andi_i64(tmp, tmp, (1ull << 63) - 1);
3028        break;
3029    }
3030    write_vec_element_i64(tmp, v1, 0, ES_64);
3031
3032    if (fpf == FPF_EXT) {
3033        read_vec_element_i64(tmp, v2, 1, ES_64);
3034        write_vec_element_i64(tmp, v1, 1, ES_64);
3035    }
3036
3037    tcg_temp_free_i64(tmp);
3038
3039    return DISAS_NEXT;
3040}
3041
3042static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o)
3043{
3044    const uint8_t fpf = get_field(s, m3);
3045    const uint8_t m4 = get_field(s, m4);
3046    gen_helper_gvec_2_ptr *fn = NULL;
3047
3048    switch (fpf) {
3049    case FPF_SHORT:
3050        if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
3051            fn = gen_helper_gvec_vfsq32;
3052        }
3053        break;
3054    case FPF_LONG:
3055        fn = gen_helper_gvec_vfsq64;
3056        break;
3057    case FPF_EXT:
3058        if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
3059            fn = gen_helper_gvec_vfsq128;
3060        }
3061        break;
3062    default:
3063        break;
3064    }
3065
3066    if (!fn || extract32(m4, 0, 3)) {
3067        gen_program_exception(s, PGM_SPECIFICATION);
3068        return DISAS_NORETURN;
3069    }
3070
3071    gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, m4, fn);
3072    return DISAS_NEXT;
3073}
3074
3075static DisasJumpType op_vftci(DisasContext *s, DisasOps *o)
3076{
3077    const uint16_t i3 = get_field(s, i3);
3078    const uint8_t fpf = get_field(s, m4);
3079    const uint8_t m5 = get_field(s, m5);
3080    gen_helper_gvec_2_ptr *fn = NULL;
3081
3082    switch (fpf) {
3083    case FPF_SHORT:
3084        if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
3085            fn = gen_helper_gvec_vftci32;
3086        }
3087        break;
3088    case FPF_LONG:
3089        fn = gen_helper_gvec_vftci64;
3090        break;
3091    case FPF_EXT:
3092        if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
3093            fn = gen_helper_gvec_vftci128;
3094        }
3095        break;
3096    default:
3097        break;
3098    }
3099
3100    if (!fn || extract32(m5, 0, 3)) {
3101        gen_program_exception(s, PGM_SPECIFICATION);
3102        return DISAS_NORETURN;
3103    }
3104
3105    gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
3106                   deposit32(m5, 4, 12, i3), fn);
3107    set_cc_static(s);
3108    return DISAS_NEXT;
3109}
3110