xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision 2fda0e77)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "exec/exec-all.h"
35 #include "tcg/tcg-op.h"
36 #include "tcg/tcg-op-gvec.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/helper-proto.h"
40 #include "exec/helper-gen.h"
41 
42 #include "exec/translator.h"
43 #include "exec/log.h"
44 #include "qemu/atomic128.h"
45 
46 #define HELPER_H "helper.h"
47 #include "exec/helper-info.c.inc"
48 #undef  HELPER_H
49 
50 
51 /* Information that (most) every instruction needs to manipulate.  */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
55 
56 /*
57  * Define a structure to hold the decoded fields.  We'll store each inside
58  * an array indexed by an enum.  In order to conserve memory, we'll arrange
59  * for fields that do not exist at the same time to overlap, thus the "C"
60  * for compact.  For checking purposes there is an "O" for original index
61  * as well that will be applied to availability bitmaps.
62  */
63 
64 enum DisasFieldIndexO {
65     FLD_O_r1,
66     FLD_O_r2,
67     FLD_O_r3,
68     FLD_O_m1,
69     FLD_O_m3,
70     FLD_O_m4,
71     FLD_O_m5,
72     FLD_O_m6,
73     FLD_O_b1,
74     FLD_O_b2,
75     FLD_O_b4,
76     FLD_O_d1,
77     FLD_O_d2,
78     FLD_O_d4,
79     FLD_O_x2,
80     FLD_O_l1,
81     FLD_O_l2,
82     FLD_O_i1,
83     FLD_O_i2,
84     FLD_O_i3,
85     FLD_O_i4,
86     FLD_O_i5,
87     FLD_O_v1,
88     FLD_O_v2,
89     FLD_O_v3,
90     FLD_O_v4,
91 };
92 
93 enum DisasFieldIndexC {
94     FLD_C_r1 = 0,
95     FLD_C_m1 = 0,
96     FLD_C_b1 = 0,
97     FLD_C_i1 = 0,
98     FLD_C_v1 = 0,
99 
100     FLD_C_r2 = 1,
101     FLD_C_b2 = 1,
102     FLD_C_i2 = 1,
103 
104     FLD_C_r3 = 2,
105     FLD_C_m3 = 2,
106     FLD_C_i3 = 2,
107     FLD_C_v3 = 2,
108 
109     FLD_C_m4 = 3,
110     FLD_C_b4 = 3,
111     FLD_C_i4 = 3,
112     FLD_C_l1 = 3,
113     FLD_C_v4 = 3,
114 
115     FLD_C_i5 = 4,
116     FLD_C_d1 = 4,
117     FLD_C_m5 = 4,
118 
119     FLD_C_d2 = 5,
120     FLD_C_m6 = 5,
121 
122     FLD_C_d4 = 6,
123     FLD_C_x2 = 6,
124     FLD_C_l2 = 6,
125     FLD_C_v2 = 6,
126 
127     NUM_C_FIELD = 7
128 };
129 
130 struct DisasFields {
131     uint64_t raw_insn;
132     unsigned op:8;
133     unsigned op2:8;
134     unsigned presentC:16;
135     unsigned int presentO;
136     int c[NUM_C_FIELD];
137 };
138 
139 struct DisasContext {
140     DisasContextBase base;
141     const DisasInsn *insn;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152     bool exit_to_mainloop;
153 };
154 
155 /* Information carried about a condition to be evaluated.  */
156 typedef struct {
157     TCGCond cond:8;
158     bool is_64;
159     union {
160         struct { TCGv_i64 a, b; } s64;
161         struct { TCGv_i32 a, b; } s32;
162     } u;
163 } DisasCompare;
164 
165 #ifdef DEBUG_INLINE_BRANCHES
166 static uint64_t inline_branch_hit[CC_OP_MAX];
167 static uint64_t inline_branch_miss[CC_OP_MAX];
168 #endif
169 
170 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
171 {
172     if (s->base.tb->flags & FLAG_MASK_32) {
173         if (s->base.tb->flags & FLAG_MASK_64) {
174             tcg_gen_movi_i64(out, pc);
175             return;
176         }
177         pc |= 0x80000000;
178     }
179     assert(!(s->base.tb->flags & FLAG_MASK_64));
180     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
181 }
182 
183 static TCGv_i64 psw_addr;
184 static TCGv_i64 psw_mask;
185 static TCGv_i64 gbea;
186 
187 static TCGv_i32 cc_op;
188 static TCGv_i64 cc_src;
189 static TCGv_i64 cc_dst;
190 static TCGv_i64 cc_vr;
191 
192 static char cpu_reg_names[16][4];
193 static TCGv_i64 regs[16];
194 
195 void s390x_translate_init(void)
196 {
197     int i;
198 
199     psw_addr = tcg_global_mem_new_i64(tcg_env,
200                                       offsetof(CPUS390XState, psw.addr),
201                                       "psw_addr");
202     psw_mask = tcg_global_mem_new_i64(tcg_env,
203                                       offsetof(CPUS390XState, psw.mask),
204                                       "psw_mask");
205     gbea = tcg_global_mem_new_i64(tcg_env,
206                                   offsetof(CPUS390XState, gbea),
207                                   "gbea");
208 
209     cc_op = tcg_global_mem_new_i32(tcg_env, offsetof(CPUS390XState, cc_op),
210                                    "cc_op");
211     cc_src = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_src),
212                                     "cc_src");
213     cc_dst = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_dst),
214                                     "cc_dst");
215     cc_vr = tcg_global_mem_new_i64(tcg_env, offsetof(CPUS390XState, cc_vr),
216                                    "cc_vr");
217 
218     for (i = 0; i < 16; i++) {
219         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
220         regs[i] = tcg_global_mem_new(tcg_env,
221                                      offsetof(CPUS390XState, regs[i]),
222                                      cpu_reg_names[i]);
223     }
224 }
225 
226 static inline int vec_full_reg_offset(uint8_t reg)
227 {
228     g_assert(reg < 32);
229     return offsetof(CPUS390XState, vregs[reg][0]);
230 }
231 
232 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
233 {
234     /* Convert element size (es) - e.g. MO_8 - to bytes */
235     const uint8_t bytes = 1 << es;
236     int offs = enr * bytes;
237 
238     /*
239      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
240      * of the 16 byte vector, on both, little and big endian systems.
241      *
242      * Big Endian (target/possible host)
243      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
244      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
245      * W:  [             0][             1] - [             2][             3]
246      * DW: [                             0] - [                             1]
247      *
248      * Little Endian (possible host)
249      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
250      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
251      * W:  [             1][             0] - [             3][             2]
252      * DW: [                             0] - [                             1]
253      *
254      * For 16 byte elements, the two 8 byte halves will not form a host
255      * int128 if the host is little endian, since they're in the wrong order.
256      * Some operations (e.g. xor) do not care. For operations like addition,
257      * the two 8 byte elements have to be loaded separately. Let's force all
258      * 16 byte operations to handle it in a special way.
259      */
260     g_assert(es <= MO_64);
261 #if !HOST_BIG_ENDIAN
262     offs ^= (8 - bytes);
263 #endif
264     return offs + vec_full_reg_offset(reg);
265 }
266 
267 static inline int freg64_offset(uint8_t reg)
268 {
269     g_assert(reg < 16);
270     return vec_reg_offset(reg, 0, MO_64);
271 }
272 
273 static inline int freg32_offset(uint8_t reg)
274 {
275     g_assert(reg < 16);
276     return vec_reg_offset(reg, 0, MO_32);
277 }
278 
279 static TCGv_i64 load_reg(int reg)
280 {
281     TCGv_i64 r = tcg_temp_new_i64();
282     tcg_gen_mov_i64(r, regs[reg]);
283     return r;
284 }
285 
286 static TCGv_i64 load_freg(int reg)
287 {
288     TCGv_i64 r = tcg_temp_new_i64();
289 
290     tcg_gen_ld_i64(r, tcg_env, freg64_offset(reg));
291     return r;
292 }
293 
294 static TCGv_i64 load_freg32_i64(int reg)
295 {
296     TCGv_i64 r = tcg_temp_new_i64();
297 
298     tcg_gen_ld32u_i64(r, tcg_env, freg32_offset(reg));
299     return r;
300 }
301 
302 static TCGv_i128 load_freg_128(int reg)
303 {
304     TCGv_i64 h = load_freg(reg);
305     TCGv_i64 l = load_freg(reg + 2);
306     TCGv_i128 r = tcg_temp_new_i128();
307 
308     tcg_gen_concat_i64_i128(r, l, h);
309     return r;
310 }
311 
312 static void store_reg(int reg, TCGv_i64 v)
313 {
314     tcg_gen_mov_i64(regs[reg], v);
315 }
316 
317 static void store_freg(int reg, TCGv_i64 v)
318 {
319     tcg_gen_st_i64(v, tcg_env, freg64_offset(reg));
320 }
321 
322 static void store_reg32_i64(int reg, TCGv_i64 v)
323 {
324     /* 32 bit register writes keep the upper half */
325     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
326 }
327 
328 static void store_reg32h_i64(int reg, TCGv_i64 v)
329 {
330     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
331 }
332 
333 static void store_freg32_i64(int reg, TCGv_i64 v)
334 {
335     tcg_gen_st32_i64(v, tcg_env, freg32_offset(reg));
336 }
337 
338 static void update_psw_addr(DisasContext *s)
339 {
340     /* psw.addr */
341     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
342 }
343 
344 static void per_branch(DisasContext *s, bool to_next)
345 {
346 #ifndef CONFIG_USER_ONLY
347     tcg_gen_movi_i64(gbea, s->base.pc_next);
348 
349     if (s->base.tb->flags & FLAG_MASK_PER) {
350         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
351         gen_helper_per_branch(tcg_env, gbea, next_pc);
352     }
353 #endif
354 }
355 
356 static void per_branch_cond(DisasContext *s, TCGCond cond,
357                             TCGv_i64 arg1, TCGv_i64 arg2)
358 {
359 #ifndef CONFIG_USER_ONLY
360     if (s->base.tb->flags & FLAG_MASK_PER) {
361         TCGLabel *lab = gen_new_label();
362         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
363 
364         tcg_gen_movi_i64(gbea, s->base.pc_next);
365         gen_helper_per_branch(tcg_env, gbea, psw_addr);
366 
367         gen_set_label(lab);
368     } else {
369         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
370         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
371     }
372 #endif
373 }
374 
375 static void per_breaking_event(DisasContext *s)
376 {
377     tcg_gen_movi_i64(gbea, s->base.pc_next);
378 }
379 
380 static void update_cc_op(DisasContext *s)
381 {
382     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
383         tcg_gen_movi_i32(cc_op, s->cc_op);
384     }
385 }
386 
387 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
388                                 uint64_t pc)
389 {
390     return (uint64_t)translator_lduw(env, &s->base, pc);
391 }
392 
393 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
394                                 uint64_t pc)
395 {
396     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
397 }
398 
399 static int get_mem_index(DisasContext *s)
400 {
401 #ifdef CONFIG_USER_ONLY
402     return MMU_USER_IDX;
403 #else
404     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
405         return MMU_REAL_IDX;
406     }
407 
408     switch (s->base.tb->flags & FLAG_MASK_ASC) {
409     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
410         return MMU_PRIMARY_IDX;
411     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
412         return MMU_SECONDARY_IDX;
413     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
414         return MMU_HOME_IDX;
415     default:
416         g_assert_not_reached();
417         break;
418     }
419 #endif
420 }
421 
422 static void gen_exception(int excp)
423 {
424     gen_helper_exception(tcg_env, tcg_constant_i32(excp));
425 }
426 
427 static void gen_program_exception(DisasContext *s, int code)
428 {
429     /* Remember what pgm exception this was.  */
430     tcg_gen_st_i32(tcg_constant_i32(code), tcg_env,
431                    offsetof(CPUS390XState, int_pgm_code));
432 
433     tcg_gen_st_i32(tcg_constant_i32(s->ilen), tcg_env,
434                    offsetof(CPUS390XState, int_pgm_ilen));
435 
436     /* update the psw */
437     update_psw_addr(s);
438 
439     /* Save off cc.  */
440     update_cc_op(s);
441 
442     /* Trigger exception.  */
443     gen_exception(EXCP_PGM);
444 }
445 
446 static inline void gen_illegal_opcode(DisasContext *s)
447 {
448     gen_program_exception(s, PGM_OPERATION);
449 }
450 
451 static inline void gen_data_exception(uint8_t dxc)
452 {
453     gen_helper_data_exception(tcg_env, tcg_constant_i32(dxc));
454 }
455 
456 static inline void gen_trap(DisasContext *s)
457 {
458     /* Set DXC to 0xff */
459     gen_data_exception(0xff);
460 }
461 
462 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
463                                   int64_t imm)
464 {
465     tcg_gen_addi_i64(dst, src, imm);
466     if (!(s->base.tb->flags & FLAG_MASK_64)) {
467         if (s->base.tb->flags & FLAG_MASK_32) {
468             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
469         } else {
470             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
471         }
472     }
473 }
474 
475 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
476 {
477     TCGv_i64 tmp = tcg_temp_new_i64();
478 
479     /*
480      * Note that d2 is limited to 20 bits, signed.  If we crop negative
481      * displacements early we create larger immediate addends.
482      */
483     if (b2 && x2) {
484         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
485         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
486     } else if (b2) {
487         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
488     } else if (x2) {
489         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
490     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
491         if (s->base.tb->flags & FLAG_MASK_32) {
492             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
493         } else {
494             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
495         }
496     } else {
497         tcg_gen_movi_i64(tmp, d2);
498     }
499 
500     return tmp;
501 }
502 
503 static inline bool live_cc_data(DisasContext *s)
504 {
505     return (s->cc_op != CC_OP_DYNAMIC
506             && s->cc_op != CC_OP_STATIC
507             && s->cc_op > 3);
508 }
509 
510 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
511 {
512     if (live_cc_data(s)) {
513         tcg_gen_discard_i64(cc_src);
514         tcg_gen_discard_i64(cc_dst);
515         tcg_gen_discard_i64(cc_vr);
516     }
517     s->cc_op = CC_OP_CONST0 + val;
518 }
519 
520 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
521 {
522     if (live_cc_data(s)) {
523         tcg_gen_discard_i64(cc_src);
524         tcg_gen_discard_i64(cc_vr);
525     }
526     tcg_gen_mov_i64(cc_dst, dst);
527     s->cc_op = op;
528 }
529 
530 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
531                                   TCGv_i64 dst)
532 {
533     if (live_cc_data(s)) {
534         tcg_gen_discard_i64(cc_vr);
535     }
536     tcg_gen_mov_i64(cc_src, src);
537     tcg_gen_mov_i64(cc_dst, dst);
538     s->cc_op = op;
539 }
540 
541 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
542                                   TCGv_i64 dst, TCGv_i64 vr)
543 {
544     tcg_gen_mov_i64(cc_src, src);
545     tcg_gen_mov_i64(cc_dst, dst);
546     tcg_gen_mov_i64(cc_vr, vr);
547     s->cc_op = op;
548 }
549 
550 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
551 {
552     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
553 }
554 
555 /* CC value is in env->cc_op */
556 static void set_cc_static(DisasContext *s)
557 {
558     if (live_cc_data(s)) {
559         tcg_gen_discard_i64(cc_src);
560         tcg_gen_discard_i64(cc_dst);
561         tcg_gen_discard_i64(cc_vr);
562     }
563     s->cc_op = CC_OP_STATIC;
564 }
565 
566 /* calculates cc into cc_op */
567 static void gen_op_calc_cc(DisasContext *s)
568 {
569     TCGv_i32 local_cc_op = NULL;
570     TCGv_i64 dummy = NULL;
571 
572     switch (s->cc_op) {
573     default:
574         dummy = tcg_constant_i64(0);
575         /* FALLTHRU */
576     case CC_OP_ADD_64:
577     case CC_OP_SUB_64:
578     case CC_OP_ADD_32:
579     case CC_OP_SUB_32:
580         local_cc_op = tcg_constant_i32(s->cc_op);
581         break;
582     case CC_OP_CONST0:
583     case CC_OP_CONST1:
584     case CC_OP_CONST2:
585     case CC_OP_CONST3:
586     case CC_OP_STATIC:
587     case CC_OP_DYNAMIC:
588         break;
589     }
590 
591     switch (s->cc_op) {
592     case CC_OP_CONST0:
593     case CC_OP_CONST1:
594     case CC_OP_CONST2:
595     case CC_OP_CONST3:
596         /* s->cc_op is the cc value */
597         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
598         break;
599     case CC_OP_STATIC:
600         /* env->cc_op already is the cc value */
601         break;
602     case CC_OP_NZ:
603         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
604         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
605         break;
606     case CC_OP_ABS_64:
607     case CC_OP_NABS_64:
608     case CC_OP_ABS_32:
609     case CC_OP_NABS_32:
610     case CC_OP_LTGT0_32:
611     case CC_OP_LTGT0_64:
612     case CC_OP_COMP_32:
613     case CC_OP_COMP_64:
614     case CC_OP_NZ_F32:
615     case CC_OP_NZ_F64:
616     case CC_OP_FLOGR:
617     case CC_OP_LCBB:
618     case CC_OP_MULS_32:
619         /* 1 argument */
620         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, dummy, cc_dst, dummy);
621         break;
622     case CC_OP_ADDU:
623     case CC_OP_ICM:
624     case CC_OP_LTGT_32:
625     case CC_OP_LTGT_64:
626     case CC_OP_LTUGTU_32:
627     case CC_OP_LTUGTU_64:
628     case CC_OP_TM_32:
629     case CC_OP_TM_64:
630     case CC_OP_SLA:
631     case CC_OP_SUBU:
632     case CC_OP_NZ_F128:
633     case CC_OP_VC:
634     case CC_OP_MULS_64:
635         /* 2 arguments */
636         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, dummy);
637         break;
638     case CC_OP_ADD_64:
639     case CC_OP_SUB_64:
640     case CC_OP_ADD_32:
641     case CC_OP_SUB_32:
642         /* 3 arguments */
643         gen_helper_calc_cc(cc_op, tcg_env, local_cc_op, cc_src, cc_dst, cc_vr);
644         break;
645     case CC_OP_DYNAMIC:
646         /* unknown operation - assume 3 arguments and cc_op in env */
647         gen_helper_calc_cc(cc_op, tcg_env, cc_op, cc_src, cc_dst, cc_vr);
648         break;
649     default:
650         g_assert_not_reached();
651     }
652 
653     /* We now have cc in cc_op as constant */
654     set_cc_static(s);
655 }
656 
657 static bool use_goto_tb(DisasContext *s, uint64_t dest)
658 {
659     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
660         return false;
661     }
662     return translator_use_goto_tb(&s->base, dest);
663 }
664 
665 static void account_noninline_branch(DisasContext *s, int cc_op)
666 {
667 #ifdef DEBUG_INLINE_BRANCHES
668     inline_branch_miss[cc_op]++;
669 #endif
670 }
671 
672 static void account_inline_branch(DisasContext *s, int cc_op)
673 {
674 #ifdef DEBUG_INLINE_BRANCHES
675     inline_branch_hit[cc_op]++;
676 #endif
677 }
678 
679 /* Table of mask values to comparison codes, given a comparison as input.
680    For such, CC=3 should not be possible.  */
681 static const TCGCond ltgt_cond[16] = {
682     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
683     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
684     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
685     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
686     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
687     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
688     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
689     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
690 };
691 
692 /* Table of mask values to comparison codes, given a logic op as input.
693    For such, only CC=0 and CC=1 should be possible.  */
694 static const TCGCond nz_cond[16] = {
695     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
696     TCG_COND_NEVER, TCG_COND_NEVER,
697     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
698     TCG_COND_NE, TCG_COND_NE,
699     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
700     TCG_COND_EQ, TCG_COND_EQ,
701     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
702     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
703 };
704 
705 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
706    details required to generate a TCG comparison.  */
707 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
708 {
709     TCGCond cond;
710     enum cc_op old_cc_op = s->cc_op;
711 
712     if (mask == 15 || mask == 0) {
713         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
714         c->u.s32.a = cc_op;
715         c->u.s32.b = cc_op;
716         c->is_64 = false;
717         return;
718     }
719 
720     /* Find the TCG condition for the mask + cc op.  */
721     switch (old_cc_op) {
722     case CC_OP_LTGT0_32:
723     case CC_OP_LTGT0_64:
724     case CC_OP_LTGT_32:
725     case CC_OP_LTGT_64:
726         cond = ltgt_cond[mask];
727         if (cond == TCG_COND_NEVER) {
728             goto do_dynamic;
729         }
730         account_inline_branch(s, old_cc_op);
731         break;
732 
733     case CC_OP_LTUGTU_32:
734     case CC_OP_LTUGTU_64:
735         cond = tcg_unsigned_cond(ltgt_cond[mask]);
736         if (cond == TCG_COND_NEVER) {
737             goto do_dynamic;
738         }
739         account_inline_branch(s, old_cc_op);
740         break;
741 
742     case CC_OP_NZ:
743         cond = nz_cond[mask];
744         if (cond == TCG_COND_NEVER) {
745             goto do_dynamic;
746         }
747         account_inline_branch(s, old_cc_op);
748         break;
749 
750     case CC_OP_TM_32:
751     case CC_OP_TM_64:
752         switch (mask) {
753         case 8:
754             cond = TCG_COND_TSTEQ;
755             break;
756         case 4 | 2 | 1:
757             cond = TCG_COND_TSTNE;
758             break;
759         default:
760             goto do_dynamic;
761         }
762         account_inline_branch(s, old_cc_op);
763         break;
764 
765     case CC_OP_ICM:
766         switch (mask) {
767         case 8:
768             cond = TCG_COND_TSTEQ;
769             break;
770         case 4 | 2 | 1:
771         case 4 | 2:
772             cond = TCG_COND_TSTNE;
773             break;
774         default:
775             goto do_dynamic;
776         }
777         account_inline_branch(s, old_cc_op);
778         break;
779 
780     case CC_OP_FLOGR:
781         switch (mask & 0xa) {
782         case 8: /* src == 0 -> no one bit found */
783             cond = TCG_COND_EQ;
784             break;
785         case 2: /* src != 0 -> one bit found */
786             cond = TCG_COND_NE;
787             break;
788         default:
789             goto do_dynamic;
790         }
791         account_inline_branch(s, old_cc_op);
792         break;
793 
794     case CC_OP_ADDU:
795     case CC_OP_SUBU:
796         switch (mask) {
797         case 8 | 2: /* result == 0 */
798             cond = TCG_COND_EQ;
799             break;
800         case 4 | 1: /* result != 0 */
801             cond = TCG_COND_NE;
802             break;
803         case 8 | 4: /* !carry (borrow) */
804             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
805             break;
806         case 2 | 1: /* carry (!borrow) */
807             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
808             break;
809         default:
810             goto do_dynamic;
811         }
812         account_inline_branch(s, old_cc_op);
813         break;
814 
815     default:
816     do_dynamic:
817         /* Calculate cc value.  */
818         gen_op_calc_cc(s);
819         /* FALLTHRU */
820 
821     case CC_OP_STATIC:
822         /* Jump based on CC.  We'll load up the real cond below;
823            the assignment here merely avoids a compiler warning.  */
824         account_noninline_branch(s, old_cc_op);
825         old_cc_op = CC_OP_STATIC;
826         cond = TCG_COND_NEVER;
827         break;
828     }
829 
830     /* Load up the arguments of the comparison.  */
831     c->is_64 = true;
832     switch (old_cc_op) {
833     case CC_OP_LTGT0_32:
834         c->is_64 = false;
835         c->u.s32.a = tcg_temp_new_i32();
836         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
837         c->u.s32.b = tcg_constant_i32(0);
838         break;
839     case CC_OP_LTGT_32:
840     case CC_OP_LTUGTU_32:
841         c->is_64 = false;
842         c->u.s32.a = tcg_temp_new_i32();
843         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
844         c->u.s32.b = tcg_temp_new_i32();
845         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
846         break;
847 
848     case CC_OP_LTGT0_64:
849     case CC_OP_NZ:
850     case CC_OP_FLOGR:
851         c->u.s64.a = cc_dst;
852         c->u.s64.b = tcg_constant_i64(0);
853         break;
854 
855     case CC_OP_LTGT_64:
856     case CC_OP_LTUGTU_64:
857     case CC_OP_TM_32:
858     case CC_OP_TM_64:
859     case CC_OP_ICM:
860         c->u.s64.a = cc_src;
861         c->u.s64.b = cc_dst;
862         break;
863 
864     case CC_OP_ADDU:
865     case CC_OP_SUBU:
866         c->is_64 = true;
867         c->u.s64.b = tcg_constant_i64(0);
868         switch (mask) {
869         case 8 | 2:
870         case 4 | 1: /* result */
871             c->u.s64.a = cc_dst;
872             break;
873         case 8 | 4:
874         case 2 | 1: /* carry */
875             c->u.s64.a = cc_src;
876             break;
877         default:
878             g_assert_not_reached();
879         }
880         break;
881 
882     case CC_OP_STATIC:
883         c->is_64 = false;
884         c->u.s32.a = cc_op;
885 
886         /* Fold half of the cases using bit 3 to invert. */
887         switch (mask & 8 ? mask ^ 0xf : mask) {
888         case 0x1: /* cc == 3 */
889             cond = TCG_COND_EQ;
890             c->u.s32.b = tcg_constant_i32(3);
891             break;
892         case 0x2: /* cc == 2 */
893             cond = TCG_COND_EQ;
894             c->u.s32.b = tcg_constant_i32(2);
895             break;
896         case 0x4: /* cc == 1 */
897             cond = TCG_COND_EQ;
898             c->u.s32.b = tcg_constant_i32(1);
899             break;
900         case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */
901             cond = TCG_COND_GTU;
902             c->u.s32.b = tcg_constant_i32(1);
903             break;
904         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
905             cond = TCG_COND_TSTNE;
906             c->u.s32.b = tcg_constant_i32(1);
907             break;
908         case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */
909             cond = TCG_COND_LEU;
910             c->u.s32.a = tcg_temp_new_i32();
911             c->u.s32.b = tcg_constant_i32(1);
912             tcg_gen_addi_i32(c->u.s32.a, cc_op, -1);
913             break;
914         case 0x4 | 0x2 | 0x1: /* cc != 0 */
915             cond = TCG_COND_NE;
916             c->u.s32.b = tcg_constant_i32(0);
917             break;
918         default:
919             /* case 0: never, handled above. */
920             g_assert_not_reached();
921         }
922         if (mask & 8) {
923             cond = tcg_invert_cond(cond);
924         }
925         break;
926 
927     default:
928         abort();
929     }
930     c->cond = cond;
931 }
932 
933 /* ====================================================================== */
934 /* Define the insn format enumeration.  */
935 #define F0(N)                         FMT_##N,
936 #define F1(N, X1)                     F0(N)
937 #define F2(N, X1, X2)                 F0(N)
938 #define F3(N, X1, X2, X3)             F0(N)
939 #define F4(N, X1, X2, X3, X4)         F0(N)
940 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
941 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
942 
943 typedef enum {
944 #include "insn-format.h.inc"
945 } DisasFormat;
946 
947 #undef F0
948 #undef F1
949 #undef F2
950 #undef F3
951 #undef F4
952 #undef F5
953 #undef F6
954 
955 /* This is the way fields are to be accessed out of DisasFields.  */
956 #define have_field(S, F)  have_field1((S), FLD_O_##F)
957 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
958 
959 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
960 {
961     return (s->fields.presentO >> c) & 1;
962 }
963 
964 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
965                       enum DisasFieldIndexC c)
966 {
967     assert(have_field1(s, o));
968     return s->fields.c[c];
969 }
970 
971 /* Describe the layout of each field in each format.  */
972 typedef struct DisasField {
973     unsigned int beg:8;
974     unsigned int size:8;
975     unsigned int type:2;
976     unsigned int indexC:6;
977     enum DisasFieldIndexO indexO:8;
978 } DisasField;
979 
980 typedef struct DisasFormatInfo {
981     DisasField op[NUM_C_FIELD];
982 } DisasFormatInfo;
983 
984 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
985 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
986 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
987 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
988                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
989 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
990                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
991                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
992 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
993                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
994 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
995                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
996                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
997 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
998 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
999 
1000 #define F0(N)                     { { } },
1001 #define F1(N, X1)                 { { X1 } },
1002 #define F2(N, X1, X2)             { { X1, X2 } },
1003 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1004 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1005 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1006 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1007 
1008 static const DisasFormatInfo format_info[] = {
1009 #include "insn-format.h.inc"
1010 };
1011 
1012 #undef F0
1013 #undef F1
1014 #undef F2
1015 #undef F3
1016 #undef F4
1017 #undef F5
1018 #undef F6
1019 #undef R
1020 #undef M
1021 #undef V
1022 #undef BD
1023 #undef BXD
1024 #undef BDL
1025 #undef BXDL
1026 #undef I
1027 #undef L
1028 
1029 /* Generally, we'll extract operands into this structures, operate upon
1030    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1031    of routines below for more details.  */
1032 typedef struct {
1033     TCGv_i64 out, out2, in1, in2;
1034     TCGv_i64 addr1;
1035     TCGv_i128 out_128, in1_128, in2_128;
1036 } DisasOps;
1037 
1038 /* Instructions can place constraints on their operands, raising specification
1039    exceptions if they are violated.  To make this easy to automate, each "in1",
1040    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1041    of the following, or 0.  To make this easy to document, we'll put the
1042    SPEC_<name> defines next to <name>.  */
1043 
1044 #define SPEC_r1_even    1
1045 #define SPEC_r2_even    2
1046 #define SPEC_r3_even    4
1047 #define SPEC_r1_f128    8
1048 #define SPEC_r2_f128    16
1049 
1050 /* Return values from translate_one, indicating the state of the TB.  */
1051 
1052 /* We are not using a goto_tb (for whatever reason), but have updated
1053    the PC (for whatever reason), so there's no need to do it again on
1054    exiting the TB.  */
1055 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1056 
1057 /* We have updated the PC and CC values.  */
1058 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1059 
1060 
1061 /* Instruction flags */
1062 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1063 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1064 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1065 #define IF_BFP      0x0008      /* binary floating point instruction */
1066 #define IF_DFP      0x0010      /* decimal floating point instruction */
1067 #define IF_PRIV     0x0020      /* privileged instruction */
1068 #define IF_VEC      0x0040      /* vector instruction */
1069 #define IF_IO       0x0080      /* input/output instruction */
1070 
1071 struct DisasInsn {
1072     unsigned opc:16;
1073     unsigned flags:16;
1074     DisasFormat fmt:8;
1075     unsigned fac:8;
1076     unsigned spec:8;
1077 
1078     const char *name;
1079 
1080     /* Pre-process arguments before HELP_OP.  */
1081     void (*help_in1)(DisasContext *, DisasOps *);
1082     void (*help_in2)(DisasContext *, DisasOps *);
1083     void (*help_prep)(DisasContext *, DisasOps *);
1084 
1085     /*
1086      * Post-process output after HELP_OP.
1087      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1088      */
1089     void (*help_wout)(DisasContext *, DisasOps *);
1090     void (*help_cout)(DisasContext *, DisasOps *);
1091 
1092     /* Implement the operation itself.  */
1093     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1094 
1095     uint64_t data;
1096 };
1097 
1098 /* ====================================================================== */
1099 /* Miscellaneous helpers, used by several operations.  */
1100 
1101 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1102 {
1103     if (dest == s->pc_tmp) {
1104         per_branch(s, true);
1105         return DISAS_NEXT;
1106     }
1107     if (use_goto_tb(s, dest)) {
1108         update_cc_op(s);
1109         per_breaking_event(s);
1110         tcg_gen_goto_tb(0);
1111         tcg_gen_movi_i64(psw_addr, dest);
1112         tcg_gen_exit_tb(s->base.tb, 0);
1113         return DISAS_NORETURN;
1114     } else {
1115         tcg_gen_movi_i64(psw_addr, dest);
1116         per_branch(s, false);
1117         return DISAS_PC_UPDATED;
1118     }
1119 }
1120 
1121 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1122                                  bool is_imm, int imm, TCGv_i64 cdest)
1123 {
1124     DisasJumpType ret;
1125     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1126     TCGLabel *lab;
1127 
1128     /* Take care of the special cases first.  */
1129     if (c->cond == TCG_COND_NEVER) {
1130         ret = DISAS_NEXT;
1131         goto egress;
1132     }
1133     if (is_imm) {
1134         if (dest == s->pc_tmp) {
1135             /* Branch to next.  */
1136             per_branch(s, true);
1137             ret = DISAS_NEXT;
1138             goto egress;
1139         }
1140         if (c->cond == TCG_COND_ALWAYS) {
1141             ret = help_goto_direct(s, dest);
1142             goto egress;
1143         }
1144     } else {
1145         if (!cdest) {
1146             /* E.g. bcr %r0 -> no branch.  */
1147             ret = DISAS_NEXT;
1148             goto egress;
1149         }
1150         if (c->cond == TCG_COND_ALWAYS) {
1151             tcg_gen_mov_i64(psw_addr, cdest);
1152             per_branch(s, false);
1153             ret = DISAS_PC_UPDATED;
1154             goto egress;
1155         }
1156     }
1157 
1158     if (use_goto_tb(s, s->pc_tmp)) {
1159         if (is_imm && use_goto_tb(s, dest)) {
1160             /* Both exits can use goto_tb.  */
1161             update_cc_op(s);
1162 
1163             lab = gen_new_label();
1164             if (c->is_64) {
1165                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1166             } else {
1167                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1168             }
1169 
1170             /* Branch not taken.  */
1171             tcg_gen_goto_tb(0);
1172             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1173             tcg_gen_exit_tb(s->base.tb, 0);
1174 
1175             /* Branch taken.  */
1176             gen_set_label(lab);
1177             per_breaking_event(s);
1178             tcg_gen_goto_tb(1);
1179             tcg_gen_movi_i64(psw_addr, dest);
1180             tcg_gen_exit_tb(s->base.tb, 1);
1181 
1182             ret = DISAS_NORETURN;
1183         } else {
1184             /* Fallthru can use goto_tb, but taken branch cannot.  */
1185             /* Store taken branch destination before the brcond.  This
1186                avoids having to allocate a new local temp to hold it.
1187                We'll overwrite this in the not taken case anyway.  */
1188             if (!is_imm) {
1189                 tcg_gen_mov_i64(psw_addr, cdest);
1190             }
1191 
1192             lab = gen_new_label();
1193             if (c->is_64) {
1194                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1195             } else {
1196                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1197             }
1198 
1199             /* Branch not taken.  */
1200             update_cc_op(s);
1201             tcg_gen_goto_tb(0);
1202             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1203             tcg_gen_exit_tb(s->base.tb, 0);
1204 
1205             gen_set_label(lab);
1206             if (is_imm) {
1207                 tcg_gen_movi_i64(psw_addr, dest);
1208             }
1209             per_breaking_event(s);
1210             ret = DISAS_PC_UPDATED;
1211         }
1212     } else {
1213         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1214            Most commonly we're single-stepping or some other condition that
1215            disables all use of goto_tb.  Just update the PC and exit.  */
1216 
1217         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1218         if (is_imm) {
1219             cdest = tcg_constant_i64(dest);
1220         }
1221 
1222         if (c->is_64) {
1223             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1224                                 cdest, next);
1225             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1226         } else {
1227             TCGv_i32 t0 = tcg_temp_new_i32();
1228             TCGv_i64 t1 = tcg_temp_new_i64();
1229             TCGv_i64 z = tcg_constant_i64(0);
1230             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1231             tcg_gen_extu_i32_i64(t1, t0);
1232             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1233             per_branch_cond(s, TCG_COND_NE, t1, z);
1234         }
1235 
1236         ret = DISAS_PC_UPDATED;
1237     }
1238 
1239  egress:
1240     return ret;
1241 }
1242 
1243 /* ====================================================================== */
1244 /* The operations.  These perform the bulk of the work for any insn,
1245    usually after the operands have been loaded and output initialized.  */
1246 
1247 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1248 {
1249     tcg_gen_abs_i64(o->out, o->in2);
1250     return DISAS_NEXT;
1251 }
1252 
1253 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1254 {
1255     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1256     return DISAS_NEXT;
1257 }
1258 
1259 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1260 {
1261     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1262     return DISAS_NEXT;
1263 }
1264 
1265 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1266 {
1267     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1268     tcg_gen_mov_i64(o->out2, o->in2);
1269     return DISAS_NEXT;
1270 }
1271 
1272 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1273 {
1274     tcg_gen_add_i64(o->out, o->in1, o->in2);
1275     return DISAS_NEXT;
1276 }
1277 
1278 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1279 {
1280     tcg_gen_movi_i64(cc_src, 0);
1281     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1282     return DISAS_NEXT;
1283 }
1284 
1285 /* Compute carry into cc_src. */
1286 static void compute_carry(DisasContext *s)
1287 {
1288     switch (s->cc_op) {
1289     case CC_OP_ADDU:
1290         /* The carry value is already in cc_src (1,0). */
1291         break;
1292     case CC_OP_SUBU:
1293         tcg_gen_addi_i64(cc_src, cc_src, 1);
1294         break;
1295     default:
1296         gen_op_calc_cc(s);
1297         /* fall through */
1298     case CC_OP_STATIC:
1299         /* The carry flag is the msb of CC; compute into cc_src. */
1300         tcg_gen_extu_i32_i64(cc_src, cc_op);
1301         tcg_gen_shri_i64(cc_src, cc_src, 1);
1302         break;
1303     }
1304 }
1305 
1306 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1307 {
1308     compute_carry(s);
1309     tcg_gen_add_i64(o->out, o->in1, o->in2);
1310     tcg_gen_add_i64(o->out, o->out, cc_src);
1311     return DISAS_NEXT;
1312 }
1313 
1314 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1315 {
1316     compute_carry(s);
1317 
1318     TCGv_i64 zero = tcg_constant_i64(0);
1319     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1320     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1321 
1322     return DISAS_NEXT;
1323 }
1324 
1325 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1326 {
1327     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1328 
1329     o->in1 = tcg_temp_new_i64();
1330     if (non_atomic) {
1331         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1332     } else {
1333         /* Perform the atomic addition in memory. */
1334         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1335                                      s->insn->data);
1336     }
1337 
1338     /* Recompute also for atomic case: needed for setting CC. */
1339     tcg_gen_add_i64(o->out, o->in1, o->in2);
1340 
1341     if (non_atomic) {
1342         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1343     }
1344     return DISAS_NEXT;
1345 }
1346 
1347 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1348 {
1349     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1350 
1351     o->in1 = tcg_temp_new_i64();
1352     if (non_atomic) {
1353         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1354     } else {
1355         /* Perform the atomic addition in memory. */
1356         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1357                                      s->insn->data);
1358     }
1359 
1360     /* Recompute also for atomic case: needed for setting CC. */
1361     tcg_gen_movi_i64(cc_src, 0);
1362     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1363 
1364     if (non_atomic) {
1365         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1366     }
1367     return DISAS_NEXT;
1368 }
1369 
1370 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1371 {
1372     gen_helper_aeb(o->out, tcg_env, o->in1, o->in2);
1373     return DISAS_NEXT;
1374 }
1375 
1376 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1377 {
1378     gen_helper_adb(o->out, tcg_env, o->in1, o->in2);
1379     return DISAS_NEXT;
1380 }
1381 
1382 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1383 {
1384     gen_helper_axb(o->out_128, tcg_env, o->in1_128, o->in2_128);
1385     return DISAS_NEXT;
1386 }
1387 
1388 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1389 {
1390     tcg_gen_and_i64(o->out, o->in1, o->in2);
1391     return DISAS_NEXT;
1392 }
1393 
1394 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1395 {
1396     int shift = s->insn->data & 0xff;
1397     int size = s->insn->data >> 8;
1398     uint64_t mask = ((1ull << size) - 1) << shift;
1399     TCGv_i64 t = tcg_temp_new_i64();
1400 
1401     tcg_gen_shli_i64(t, o->in2, shift);
1402     tcg_gen_ori_i64(t, t, ~mask);
1403     tcg_gen_and_i64(o->out, o->in1, t);
1404 
1405     /* Produce the CC from only the bits manipulated.  */
1406     tcg_gen_andi_i64(cc_dst, o->out, mask);
1407     set_cc_nz_u64(s, cc_dst);
1408     return DISAS_NEXT;
1409 }
1410 
1411 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1412 {
1413     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1414     return DISAS_NEXT;
1415 }
1416 
1417 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1418 {
1419     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1420     return DISAS_NEXT;
1421 }
1422 
1423 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1424 {
1425     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1426     return DISAS_NEXT;
1427 }
1428 
1429 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1430 {
1431     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1432     return DISAS_NEXT;
1433 }
1434 
1435 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1436 {
1437     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1438     return DISAS_NEXT;
1439 }
1440 
1441 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1442 {
1443     o->in1 = tcg_temp_new_i64();
1444 
1445     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1446         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1447     } else {
1448         /* Perform the atomic operation in memory. */
1449         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1450                                      s->insn->data);
1451     }
1452 
1453     /* Recompute also for atomic case: needed for setting CC. */
1454     tcg_gen_and_i64(o->out, o->in1, o->in2);
1455 
1456     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1457         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1458     }
1459     return DISAS_NEXT;
1460 }
1461 
1462 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1463 {
1464     pc_to_link_info(o->out, s, s->pc_tmp);
1465     if (o->in2) {
1466         tcg_gen_mov_i64(psw_addr, o->in2);
1467         per_branch(s, false);
1468         return DISAS_PC_UPDATED;
1469     } else {
1470         return DISAS_NEXT;
1471     }
1472 }
1473 
1474 static void save_link_info(DisasContext *s, DisasOps *o)
1475 {
1476     TCGv_i64 t;
1477 
1478     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1479         pc_to_link_info(o->out, s, s->pc_tmp);
1480         return;
1481     }
1482     gen_op_calc_cc(s);
1483     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1484     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1485     t = tcg_temp_new_i64();
1486     tcg_gen_shri_i64(t, psw_mask, 16);
1487     tcg_gen_andi_i64(t, t, 0x0f000000);
1488     tcg_gen_or_i64(o->out, o->out, t);
1489     tcg_gen_extu_i32_i64(t, cc_op);
1490     tcg_gen_shli_i64(t, t, 28);
1491     tcg_gen_or_i64(o->out, o->out, t);
1492 }
1493 
1494 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1495 {
1496     save_link_info(s, o);
1497     if (o->in2) {
1498         tcg_gen_mov_i64(psw_addr, o->in2);
1499         per_branch(s, false);
1500         return DISAS_PC_UPDATED;
1501     } else {
1502         return DISAS_NEXT;
1503     }
1504 }
1505 
1506 /*
1507  * Disassemble the target of a branch. The results are returned in a form
1508  * suitable for passing into help_branch():
1509  *
1510  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1511  *   branches, whose DisasContext *S contains the relative immediate field RI,
1512  *   are considered fixed. All the other branches are considered computed.
1513  * - int IMM is the value of RI.
1514  * - TCGv_i64 CDEST is the address of the computed target.
1515  */
1516 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1517     if (have_field(s, ri)) {                                                   \
1518         if (unlikely(s->ex_value)) {                                           \
1519             cdest = tcg_temp_new_i64();                                        \
1520             tcg_gen_ld_i64(cdest, tcg_env, offsetof(CPUS390XState, ex_target));\
1521             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1522             is_imm = false;                                                    \
1523         } else {                                                               \
1524             is_imm = true;                                                     \
1525         }                                                                      \
1526     } else {                                                                   \
1527         is_imm = false;                                                        \
1528     }                                                                          \
1529     imm = is_imm ? get_field(s, ri) : 0;                                       \
1530 } while (false)
1531 
1532 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1533 {
1534     DisasCompare c;
1535     bool is_imm;
1536     int imm;
1537 
1538     pc_to_link_info(o->out, s, s->pc_tmp);
1539 
1540     disas_jdest(s, i2, is_imm, imm, o->in2);
1541     disas_jcc(s, &c, 0xf);
1542     return help_branch(s, &c, is_imm, imm, o->in2);
1543 }
1544 
1545 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1546 {
1547     int m1 = get_field(s, m1);
1548     DisasCompare c;
1549     bool is_imm;
1550     int imm;
1551 
1552     /* BCR with R2 = 0 causes no branching */
1553     if (have_field(s, r2) && get_field(s, r2) == 0) {
1554         if (m1 == 14) {
1555             /* Perform serialization */
1556             /* FIXME: check for fast-BCR-serialization facility */
1557             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1558         }
1559         if (m1 == 15) {
1560             /* Perform serialization */
1561             /* FIXME: perform checkpoint-synchronisation */
1562             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1563         }
1564         return DISAS_NEXT;
1565     }
1566 
1567     disas_jdest(s, i2, is_imm, imm, o->in2);
1568     disas_jcc(s, &c, m1);
1569     return help_branch(s, &c, is_imm, imm, o->in2);
1570 }
1571 
1572 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1573 {
1574     int r1 = get_field(s, r1);
1575     DisasCompare c;
1576     bool is_imm;
1577     TCGv_i64 t;
1578     int imm;
1579 
1580     c.cond = TCG_COND_NE;
1581     c.is_64 = false;
1582 
1583     t = tcg_temp_new_i64();
1584     tcg_gen_subi_i64(t, regs[r1], 1);
1585     store_reg32_i64(r1, t);
1586     c.u.s32.a = tcg_temp_new_i32();
1587     c.u.s32.b = tcg_constant_i32(0);
1588     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1589 
1590     disas_jdest(s, i2, is_imm, imm, o->in2);
1591     return help_branch(s, &c, is_imm, imm, o->in2);
1592 }
1593 
1594 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1595 {
1596     int r1 = get_field(s, r1);
1597     int imm = get_field(s, i2);
1598     DisasCompare c;
1599     TCGv_i64 t;
1600 
1601     c.cond = TCG_COND_NE;
1602     c.is_64 = false;
1603 
1604     t = tcg_temp_new_i64();
1605     tcg_gen_shri_i64(t, regs[r1], 32);
1606     tcg_gen_subi_i64(t, t, 1);
1607     store_reg32h_i64(r1, t);
1608     c.u.s32.a = tcg_temp_new_i32();
1609     c.u.s32.b = tcg_constant_i32(0);
1610     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1611 
1612     return help_branch(s, &c, 1, imm, o->in2);
1613 }
1614 
1615 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1616 {
1617     int r1 = get_field(s, r1);
1618     DisasCompare c;
1619     bool is_imm;
1620     int imm;
1621 
1622     c.cond = TCG_COND_NE;
1623     c.is_64 = true;
1624 
1625     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1626     c.u.s64.a = regs[r1];
1627     c.u.s64.b = tcg_constant_i64(0);
1628 
1629     disas_jdest(s, i2, is_imm, imm, o->in2);
1630     return help_branch(s, &c, is_imm, imm, o->in2);
1631 }
1632 
1633 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1634 {
1635     int r1 = get_field(s, r1);
1636     int r3 = get_field(s, r3);
1637     DisasCompare c;
1638     bool is_imm;
1639     TCGv_i64 t;
1640     int imm;
1641 
1642     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1643     c.is_64 = false;
1644 
1645     t = tcg_temp_new_i64();
1646     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1647     c.u.s32.a = tcg_temp_new_i32();
1648     c.u.s32.b = tcg_temp_new_i32();
1649     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1650     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1651     store_reg32_i64(r1, t);
1652 
1653     disas_jdest(s, i2, is_imm, imm, o->in2);
1654     return help_branch(s, &c, is_imm, imm, o->in2);
1655 }
1656 
1657 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1658 {
1659     int r1 = get_field(s, r1);
1660     int r3 = get_field(s, r3);
1661     DisasCompare c;
1662     bool is_imm;
1663     int imm;
1664 
1665     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1666     c.is_64 = true;
1667 
1668     if (r1 == (r3 | 1)) {
1669         c.u.s64.b = load_reg(r3 | 1);
1670     } else {
1671         c.u.s64.b = regs[r3 | 1];
1672     }
1673 
1674     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1675     c.u.s64.a = regs[r1];
1676 
1677     disas_jdest(s, i2, is_imm, imm, o->in2);
1678     return help_branch(s, &c, is_imm, imm, o->in2);
1679 }
1680 
1681 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1682 {
1683     int imm, m3 = get_field(s, m3);
1684     bool is_imm;
1685     DisasCompare c;
1686 
1687     c.cond = ltgt_cond[m3];
1688     if (s->insn->data) {
1689         c.cond = tcg_unsigned_cond(c.cond);
1690     }
1691     c.is_64 = true;
1692     c.u.s64.a = o->in1;
1693     c.u.s64.b = o->in2;
1694 
1695     o->out = NULL;
1696     disas_jdest(s, i4, is_imm, imm, o->out);
1697     if (!is_imm && !o->out) {
1698         imm = 0;
1699         o->out = get_address(s, 0, get_field(s, b4),
1700                              get_field(s, d4));
1701     }
1702 
1703     return help_branch(s, &c, is_imm, imm, o->out);
1704 }
1705 
1706 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1707 {
1708     gen_helper_ceb(cc_op, tcg_env, o->in1, o->in2);
1709     set_cc_static(s);
1710     return DISAS_NEXT;
1711 }
1712 
1713 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1714 {
1715     gen_helper_cdb(cc_op, tcg_env, o->in1, o->in2);
1716     set_cc_static(s);
1717     return DISAS_NEXT;
1718 }
1719 
1720 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1721 {
1722     gen_helper_cxb(cc_op, tcg_env, o->in1_128, o->in2_128);
1723     set_cc_static(s);
1724     return DISAS_NEXT;
1725 }
1726 
1727 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1728                                    bool m4_with_fpe)
1729 {
1730     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1731     uint8_t m3 = get_field(s, m3);
1732     uint8_t m4 = get_field(s, m4);
1733 
1734     /* m3 field was introduced with FPE */
1735     if (!fpe && m3_with_fpe) {
1736         m3 = 0;
1737     }
1738     /* m4 field was introduced with FPE */
1739     if (!fpe && m4_with_fpe) {
1740         m4 = 0;
1741     }
1742 
1743     /* Check for valid rounding modes. Mode 3 was introduced later. */
1744     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1745         gen_program_exception(s, PGM_SPECIFICATION);
1746         return NULL;
1747     }
1748 
1749     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1750 }
1751 
1752 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1753 {
1754     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1755 
1756     if (!m34) {
1757         return DISAS_NORETURN;
1758     }
1759     gen_helper_cfeb(o->out, tcg_env, o->in2, m34);
1760     set_cc_static(s);
1761     return DISAS_NEXT;
1762 }
1763 
1764 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1765 {
1766     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1767 
1768     if (!m34) {
1769         return DISAS_NORETURN;
1770     }
1771     gen_helper_cfdb(o->out, tcg_env, o->in2, m34);
1772     set_cc_static(s);
1773     return DISAS_NEXT;
1774 }
1775 
1776 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1777 {
1778     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1779 
1780     if (!m34) {
1781         return DISAS_NORETURN;
1782     }
1783     gen_helper_cfxb(o->out, tcg_env, o->in2_128, m34);
1784     set_cc_static(s);
1785     return DISAS_NEXT;
1786 }
1787 
1788 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1789 {
1790     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1791 
1792     if (!m34) {
1793         return DISAS_NORETURN;
1794     }
1795     gen_helper_cgeb(o->out, tcg_env, o->in2, m34);
1796     set_cc_static(s);
1797     return DISAS_NEXT;
1798 }
1799 
1800 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1801 {
1802     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1803 
1804     if (!m34) {
1805         return DISAS_NORETURN;
1806     }
1807     gen_helper_cgdb(o->out, tcg_env, o->in2, m34);
1808     set_cc_static(s);
1809     return DISAS_NEXT;
1810 }
1811 
1812 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1813 {
1814     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1815 
1816     if (!m34) {
1817         return DISAS_NORETURN;
1818     }
1819     gen_helper_cgxb(o->out, tcg_env, o->in2_128, m34);
1820     set_cc_static(s);
1821     return DISAS_NEXT;
1822 }
1823 
1824 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1825 {
1826     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1827 
1828     if (!m34) {
1829         return DISAS_NORETURN;
1830     }
1831     gen_helper_clfeb(o->out, tcg_env, o->in2, m34);
1832     set_cc_static(s);
1833     return DISAS_NEXT;
1834 }
1835 
1836 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1837 {
1838     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1839 
1840     if (!m34) {
1841         return DISAS_NORETURN;
1842     }
1843     gen_helper_clfdb(o->out, tcg_env, o->in2, m34);
1844     set_cc_static(s);
1845     return DISAS_NEXT;
1846 }
1847 
1848 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1849 {
1850     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1851 
1852     if (!m34) {
1853         return DISAS_NORETURN;
1854     }
1855     gen_helper_clfxb(o->out, tcg_env, o->in2_128, m34);
1856     set_cc_static(s);
1857     return DISAS_NEXT;
1858 }
1859 
1860 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1861 {
1862     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1863 
1864     if (!m34) {
1865         return DISAS_NORETURN;
1866     }
1867     gen_helper_clgeb(o->out, tcg_env, o->in2, m34);
1868     set_cc_static(s);
1869     return DISAS_NEXT;
1870 }
1871 
1872 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1873 {
1874     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1875 
1876     if (!m34) {
1877         return DISAS_NORETURN;
1878     }
1879     gen_helper_clgdb(o->out, tcg_env, o->in2, m34);
1880     set_cc_static(s);
1881     return DISAS_NEXT;
1882 }
1883 
1884 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1885 {
1886     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1887 
1888     if (!m34) {
1889         return DISAS_NORETURN;
1890     }
1891     gen_helper_clgxb(o->out, tcg_env, o->in2_128, m34);
1892     set_cc_static(s);
1893     return DISAS_NEXT;
1894 }
1895 
1896 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1897 {
1898     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1899 
1900     if (!m34) {
1901         return DISAS_NORETURN;
1902     }
1903     gen_helper_cegb(o->out, tcg_env, o->in2, m34);
1904     return DISAS_NEXT;
1905 }
1906 
1907 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1908 {
1909     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1910 
1911     if (!m34) {
1912         return DISAS_NORETURN;
1913     }
1914     gen_helper_cdgb(o->out, tcg_env, o->in2, m34);
1915     return DISAS_NEXT;
1916 }
1917 
1918 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1919 {
1920     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1921 
1922     if (!m34) {
1923         return DISAS_NORETURN;
1924     }
1925     gen_helper_cxgb(o->out_128, tcg_env, o->in2, m34);
1926     return DISAS_NEXT;
1927 }
1928 
1929 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1930 {
1931     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1932 
1933     if (!m34) {
1934         return DISAS_NORETURN;
1935     }
1936     gen_helper_celgb(o->out, tcg_env, o->in2, m34);
1937     return DISAS_NEXT;
1938 }
1939 
1940 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1941 {
1942     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1943 
1944     if (!m34) {
1945         return DISAS_NORETURN;
1946     }
1947     gen_helper_cdlgb(o->out, tcg_env, o->in2, m34);
1948     return DISAS_NEXT;
1949 }
1950 
1951 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1952 {
1953     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1954 
1955     if (!m34) {
1956         return DISAS_NORETURN;
1957     }
1958     gen_helper_cxlgb(o->out_128, tcg_env, o->in2, m34);
1959     return DISAS_NEXT;
1960 }
1961 
1962 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1963 {
1964     int r2 = get_field(s, r2);
1965     TCGv_i128 pair = tcg_temp_new_i128();
1966     TCGv_i64 len = tcg_temp_new_i64();
1967 
1968     gen_helper_cksm(pair, tcg_env, o->in1, o->in2, regs[r2 + 1]);
1969     set_cc_static(s);
1970     tcg_gen_extr_i128_i64(o->out, len, pair);
1971 
1972     tcg_gen_add_i64(regs[r2], regs[r2], len);
1973     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1974 
1975     return DISAS_NEXT;
1976 }
1977 
1978 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
1979 {
1980     int l = get_field(s, l1);
1981     TCGv_i64 src;
1982     TCGv_i32 vl;
1983     MemOp mop;
1984 
1985     switch (l + 1) {
1986     case 1:
1987     case 2:
1988     case 4:
1989     case 8:
1990         mop = ctz32(l + 1) | MO_TE;
1991         /* Do not update cc_src yet: loading cc_dst may cause an exception. */
1992         src = tcg_temp_new_i64();
1993         tcg_gen_qemu_ld_tl(src, o->addr1, get_mem_index(s), mop);
1994         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
1995         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, src, cc_dst);
1996         return DISAS_NEXT;
1997     default:
1998         vl = tcg_constant_i32(l);
1999         gen_helper_clc(cc_op, tcg_env, vl, o->addr1, o->in2);
2000         set_cc_static(s);
2001         return DISAS_NEXT;
2002     }
2003 }
2004 
2005 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2006 {
2007     int r1 = get_field(s, r1);
2008     int r2 = get_field(s, r2);
2009     TCGv_i32 t1, t2;
2010 
2011     /* r1 and r2 must be even.  */
2012     if (r1 & 1 || r2 & 1) {
2013         gen_program_exception(s, PGM_SPECIFICATION);
2014         return DISAS_NORETURN;
2015     }
2016 
2017     t1 = tcg_constant_i32(r1);
2018     t2 = tcg_constant_i32(r2);
2019     gen_helper_clcl(cc_op, tcg_env, t1, t2);
2020     set_cc_static(s);
2021     return DISAS_NEXT;
2022 }
2023 
2024 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2025 {
2026     int r1 = get_field(s, r1);
2027     int r3 = get_field(s, r3);
2028     TCGv_i32 t1, t3;
2029 
2030     /* r1 and r3 must be even.  */
2031     if (r1 & 1 || r3 & 1) {
2032         gen_program_exception(s, PGM_SPECIFICATION);
2033         return DISAS_NORETURN;
2034     }
2035 
2036     t1 = tcg_constant_i32(r1);
2037     t3 = tcg_constant_i32(r3);
2038     gen_helper_clcle(cc_op, tcg_env, t1, o->in2, t3);
2039     set_cc_static(s);
2040     return DISAS_NEXT;
2041 }
2042 
2043 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2044 {
2045     int r1 = get_field(s, r1);
2046     int r3 = get_field(s, r3);
2047     TCGv_i32 t1, t3;
2048 
2049     /* r1 and r3 must be even.  */
2050     if (r1 & 1 || r3 & 1) {
2051         gen_program_exception(s, PGM_SPECIFICATION);
2052         return DISAS_NORETURN;
2053     }
2054 
2055     t1 = tcg_constant_i32(r1);
2056     t3 = tcg_constant_i32(r3);
2057     gen_helper_clclu(cc_op, tcg_env, t1, o->in2, t3);
2058     set_cc_static(s);
2059     return DISAS_NEXT;
2060 }
2061 
2062 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2063 {
2064     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2065     TCGv_i32 t1 = tcg_temp_new_i32();
2066 
2067     tcg_gen_extrl_i64_i32(t1, o->in1);
2068     gen_helper_clm(cc_op, tcg_env, t1, m3, o->in2);
2069     set_cc_static(s);
2070     return DISAS_NEXT;
2071 }
2072 
2073 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2074 {
2075     TCGv_i128 pair = tcg_temp_new_i128();
2076 
2077     gen_helper_clst(pair, tcg_env, regs[0], o->in1, o->in2);
2078     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2079 
2080     set_cc_static(s);
2081     return DISAS_NEXT;
2082 }
2083 
2084 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2085 {
2086     TCGv_i64 t = tcg_temp_new_i64();
2087     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2088     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2089     tcg_gen_or_i64(o->out, o->out, t);
2090     return DISAS_NEXT;
2091 }
2092 
2093 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2094 {
2095     int d2 = get_field(s, d2);
2096     int b2 = get_field(s, b2);
2097     TCGv_i64 addr, cc;
2098 
2099     /* Note that in1 = R3 (new value) and
2100        in2 = (zero-extended) R1 (expected value).  */
2101 
2102     addr = get_address(s, 0, b2, d2);
2103     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2104                                get_mem_index(s), s->insn->data | MO_ALIGN);
2105 
2106     /* Are the memory and expected values (un)equal?  Note that this setcond
2107        produces the output CC value, thus the NE sense of the test.  */
2108     cc = tcg_temp_new_i64();
2109     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2110     tcg_gen_extrl_i64_i32(cc_op, cc);
2111     set_cc_static(s);
2112 
2113     return DISAS_NEXT;
2114 }
2115 
2116 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2117 {
2118     int r1 = get_field(s, r1);
2119 
2120     o->out_128 = tcg_temp_new_i128();
2121     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2122 
2123     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2124     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2125                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2126 
2127     /*
2128      * Extract result into cc_dst:cc_src, compare vs the expected value
2129      * in the as yet unmodified input registers, then update CC_OP.
2130      */
2131     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2132     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2133     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2134     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2135     set_cc_nz_u64(s, cc_dst);
2136 
2137     return DISAS_NEXT;
2138 }
2139 
2140 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2141 {
2142     int r3 = get_field(s, r3);
2143     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2144 
2145     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2146         gen_helper_csst_parallel(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2147     } else {
2148         gen_helper_csst(cc_op, tcg_env, t_r3, o->addr1, o->in2);
2149     }
2150 
2151     set_cc_static(s);
2152     return DISAS_NEXT;
2153 }
2154 
2155 #ifndef CONFIG_USER_ONLY
2156 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2157 {
2158     MemOp mop = s->insn->data;
2159     TCGv_i64 addr, old, cc;
2160     TCGLabel *lab = gen_new_label();
2161 
2162     /* Note that in1 = R1 (zero-extended expected value),
2163        out = R1 (original reg), out2 = R1+1 (new value).  */
2164 
2165     addr = tcg_temp_new_i64();
2166     old = tcg_temp_new_i64();
2167     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2168     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2169                                get_mem_index(s), mop | MO_ALIGN);
2170 
2171     /* Are the memory and expected values (un)equal?  */
2172     cc = tcg_temp_new_i64();
2173     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2174     tcg_gen_extrl_i64_i32(cc_op, cc);
2175 
2176     /* Write back the output now, so that it happens before the
2177        following branch, so that we don't need local temps.  */
2178     if ((mop & MO_SIZE) == MO_32) {
2179         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2180     } else {
2181         tcg_gen_mov_i64(o->out, old);
2182     }
2183 
2184     /* If the comparison was equal, and the LSB of R2 was set,
2185        then we need to flush the TLB (for all cpus).  */
2186     tcg_gen_xori_i64(cc, cc, 1);
2187     tcg_gen_and_i64(cc, cc, o->in2);
2188     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2189 
2190     gen_helper_purge(tcg_env);
2191     gen_set_label(lab);
2192 
2193     return DISAS_NEXT;
2194 }
2195 #endif
2196 
2197 static DisasJumpType op_cvb(DisasContext *s, DisasOps *o)
2198 {
2199     TCGv_i64 t = tcg_temp_new_i64();
2200     tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TEUQ);
2201     gen_helper_cvb(tcg_env, tcg_constant_i32(get_field(s, r1)), t);
2202     return DISAS_NEXT;
2203 }
2204 
2205 static DisasJumpType op_cvbg(DisasContext *s, DisasOps *o)
2206 {
2207     TCGv_i128 t = tcg_temp_new_i128();
2208     tcg_gen_qemu_ld_i128(t, o->addr1, get_mem_index(s), MO_TE | MO_128);
2209     gen_helper_cvbg(o->out, tcg_env, t);
2210     return DISAS_NEXT;
2211 }
2212 
2213 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2214 {
2215     TCGv_i64 t1 = tcg_temp_new_i64();
2216     TCGv_i32 t2 = tcg_temp_new_i32();
2217     tcg_gen_extrl_i64_i32(t2, o->in1);
2218     gen_helper_cvd(t1, t2);
2219     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2220     return DISAS_NEXT;
2221 }
2222 
2223 static DisasJumpType op_cvdg(DisasContext *s, DisasOps *o)
2224 {
2225     TCGv_i128 t = tcg_temp_new_i128();
2226     gen_helper_cvdg(t, o->in1);
2227     tcg_gen_qemu_st_i128(t, o->in2, get_mem_index(s), MO_TE | MO_128);
2228     return DISAS_NEXT;
2229 }
2230 
2231 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2232 {
2233     int m3 = get_field(s, m3);
2234     TCGLabel *lab = gen_new_label();
2235     TCGCond c;
2236 
2237     c = tcg_invert_cond(ltgt_cond[m3]);
2238     if (s->insn->data) {
2239         c = tcg_unsigned_cond(c);
2240     }
2241     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2242 
2243     /* Trap.  */
2244     gen_trap(s);
2245 
2246     gen_set_label(lab);
2247     return DISAS_NEXT;
2248 }
2249 
2250 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2251 {
2252     int m3 = get_field(s, m3);
2253     int r1 = get_field(s, r1);
2254     int r2 = get_field(s, r2);
2255     TCGv_i32 tr1, tr2, chk;
2256 
2257     /* R1 and R2 must both be even.  */
2258     if ((r1 | r2) & 1) {
2259         gen_program_exception(s, PGM_SPECIFICATION);
2260         return DISAS_NORETURN;
2261     }
2262     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2263         m3 = 0;
2264     }
2265 
2266     tr1 = tcg_constant_i32(r1);
2267     tr2 = tcg_constant_i32(r2);
2268     chk = tcg_constant_i32(m3);
2269 
2270     switch (s->insn->data) {
2271     case 12:
2272         gen_helper_cu12(cc_op, tcg_env, tr1, tr2, chk);
2273         break;
2274     case 14:
2275         gen_helper_cu14(cc_op, tcg_env, tr1, tr2, chk);
2276         break;
2277     case 21:
2278         gen_helper_cu21(cc_op, tcg_env, tr1, tr2, chk);
2279         break;
2280     case 24:
2281         gen_helper_cu24(cc_op, tcg_env, tr1, tr2, chk);
2282         break;
2283     case 41:
2284         gen_helper_cu41(cc_op, tcg_env, tr1, tr2, chk);
2285         break;
2286     case 42:
2287         gen_helper_cu42(cc_op, tcg_env, tr1, tr2, chk);
2288         break;
2289     default:
2290         g_assert_not_reached();
2291     }
2292 
2293     set_cc_static(s);
2294     return DISAS_NEXT;
2295 }
2296 
2297 #ifndef CONFIG_USER_ONLY
2298 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2299 {
2300     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2301     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2302     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2303 
2304     gen_helper_diag(tcg_env, r1, r3, func_code);
2305     return DISAS_NEXT;
2306 }
2307 #endif
2308 
2309 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2310 {
2311     gen_helper_divs32(o->out, tcg_env, o->in1, o->in2);
2312     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2313     return DISAS_NEXT;
2314 }
2315 
2316 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2317 {
2318     gen_helper_divu32(o->out, tcg_env, o->in1, o->in2);
2319     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2320     return DISAS_NEXT;
2321 }
2322 
2323 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2324 {
2325     TCGv_i128 t = tcg_temp_new_i128();
2326 
2327     gen_helper_divs64(t, tcg_env, o->in1, o->in2);
2328     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2329     return DISAS_NEXT;
2330 }
2331 
2332 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2333 {
2334     TCGv_i128 t = tcg_temp_new_i128();
2335 
2336     gen_helper_divu64(t, tcg_env, o->out, o->out2, o->in2);
2337     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2338     return DISAS_NEXT;
2339 }
2340 
2341 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2342 {
2343     gen_helper_deb(o->out, tcg_env, o->in1, o->in2);
2344     return DISAS_NEXT;
2345 }
2346 
2347 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2348 {
2349     gen_helper_ddb(o->out, tcg_env, o->in1, o->in2);
2350     return DISAS_NEXT;
2351 }
2352 
2353 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2354 {
2355     gen_helper_dxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
2356     return DISAS_NEXT;
2357 }
2358 
2359 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2360 {
2361     int r2 = get_field(s, r2);
2362     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, aregs[r2]));
2363     return DISAS_NEXT;
2364 }
2365 
2366 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2367 {
2368     /* No cache information provided.  */
2369     tcg_gen_movi_i64(o->out, -1);
2370     return DISAS_NEXT;
2371 }
2372 
2373 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2374 {
2375     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, fpc));
2376     return DISAS_NEXT;
2377 }
2378 
2379 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2380 {
2381     int r1 = get_field(s, r1);
2382     int r2 = get_field(s, r2);
2383     TCGv_i64 t = tcg_temp_new_i64();
2384     TCGv_i64 t_cc = tcg_temp_new_i64();
2385 
2386     /* Note the "subsequently" in the PoO, which implies a defined result
2387        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2388     gen_op_calc_cc(s);
2389     tcg_gen_extu_i32_i64(t_cc, cc_op);
2390     tcg_gen_shri_i64(t, psw_mask, 32);
2391     tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2392     store_reg32_i64(r1, t);
2393     if (r2 != 0) {
2394         store_reg32_i64(r2, psw_mask);
2395     }
2396     return DISAS_NEXT;
2397 }
2398 
2399 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2400 {
2401     int r1 = get_field(s, r1);
2402     TCGv_i32 ilen;
2403     TCGv_i64 v1;
2404 
2405     /* Nested EXECUTE is not allowed.  */
2406     if (unlikely(s->ex_value)) {
2407         gen_program_exception(s, PGM_EXECUTE);
2408         return DISAS_NORETURN;
2409     }
2410 
2411     update_psw_addr(s);
2412     update_cc_op(s);
2413 
2414     if (r1 == 0) {
2415         v1 = tcg_constant_i64(0);
2416     } else {
2417         v1 = regs[r1];
2418     }
2419 
2420     ilen = tcg_constant_i32(s->ilen);
2421     gen_helper_ex(tcg_env, ilen, v1, o->in2);
2422 
2423     return DISAS_PC_CC_UPDATED;
2424 }
2425 
2426 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2427 {
2428     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2429 
2430     if (!m34) {
2431         return DISAS_NORETURN;
2432     }
2433     gen_helper_fieb(o->out, tcg_env, o->in2, m34);
2434     return DISAS_NEXT;
2435 }
2436 
2437 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2438 {
2439     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2440 
2441     if (!m34) {
2442         return DISAS_NORETURN;
2443     }
2444     gen_helper_fidb(o->out, tcg_env, o->in2, m34);
2445     return DISAS_NEXT;
2446 }
2447 
2448 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2449 {
2450     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2451 
2452     if (!m34) {
2453         return DISAS_NORETURN;
2454     }
2455     gen_helper_fixb(o->out_128, tcg_env, o->in2_128, m34);
2456     return DISAS_NEXT;
2457 }
2458 
2459 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2460 {
2461     /* We'll use the original input for cc computation, since we get to
2462        compare that against 0, which ought to be better than comparing
2463        the real output against 64.  It also lets cc_dst be a convenient
2464        temporary during our computation.  */
2465     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2466 
2467     /* R1 = IN ? CLZ(IN) : 64.  */
2468     tcg_gen_clzi_i64(o->out, o->in2, 64);
2469 
2470     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2471        value by 64, which is undefined.  But since the shift is 64 iff the
2472        input is zero, we still get the correct result after and'ing.  */
2473     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2474     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2475     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2476     return DISAS_NEXT;
2477 }
2478 
2479 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2480 {
2481     int m3 = get_field(s, m3);
2482     int pos, len, base = s->insn->data;
2483     TCGv_i64 tmp = tcg_temp_new_i64();
2484     uint64_t ccm;
2485 
2486     switch (m3) {
2487     case 0xf:
2488         /* Effectively a 32-bit load.  */
2489         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2490         len = 32;
2491         goto one_insert;
2492 
2493     case 0xc:
2494     case 0x6:
2495     case 0x3:
2496         /* Effectively a 16-bit load.  */
2497         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2498         len = 16;
2499         goto one_insert;
2500 
2501     case 0x8:
2502     case 0x4:
2503     case 0x2:
2504     case 0x1:
2505         /* Effectively an 8-bit load.  */
2506         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2507         len = 8;
2508         goto one_insert;
2509 
2510     one_insert:
2511         pos = base + ctz32(m3) * 8;
2512         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2513         ccm = ((1ull << len) - 1) << pos;
2514         break;
2515 
2516     case 0:
2517         /* Recognize access exceptions for the first byte.  */
2518         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2519         gen_op_movi_cc(s, 0);
2520         return DISAS_NEXT;
2521 
2522     default:
2523         /* This is going to be a sequence of loads and inserts.  */
2524         pos = base + 32 - 8;
2525         ccm = 0;
2526         while (m3) {
2527             if (m3 & 0x8) {
2528                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2529                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2530                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2531                 ccm |= 0xffull << pos;
2532             }
2533             m3 = (m3 << 1) & 0xf;
2534             pos -= 8;
2535         }
2536         break;
2537     }
2538 
2539     tcg_gen_movi_i64(tmp, ccm);
2540     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2541     return DISAS_NEXT;
2542 }
2543 
2544 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2545 {
2546     int shift = s->insn->data & 0xff;
2547     int size = s->insn->data >> 8;
2548     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2549     return DISAS_NEXT;
2550 }
2551 
2552 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2553 {
2554     TCGv_i64 t1, t2;
2555 
2556     gen_op_calc_cc(s);
2557     t1 = tcg_temp_new_i64();
2558     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2559     t2 = tcg_temp_new_i64();
2560     tcg_gen_extu_i32_i64(t2, cc_op);
2561     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2562     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2563     return DISAS_NEXT;
2564 }
2565 
2566 #ifndef CONFIG_USER_ONLY
2567 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2568 {
2569     TCGv_i32 m4;
2570 
2571     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2572         m4 = tcg_constant_i32(get_field(s, m4));
2573     } else {
2574         m4 = tcg_constant_i32(0);
2575     }
2576     gen_helper_idte(tcg_env, o->in1, o->in2, m4);
2577     return DISAS_NEXT;
2578 }
2579 
2580 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2581 {
2582     TCGv_i32 m4;
2583 
2584     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2585         m4 = tcg_constant_i32(get_field(s, m4));
2586     } else {
2587         m4 = tcg_constant_i32(0);
2588     }
2589     gen_helper_ipte(tcg_env, o->in1, o->in2, m4);
2590     return DISAS_NEXT;
2591 }
2592 
2593 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2594 {
2595     gen_helper_iske(o->out, tcg_env, o->in2);
2596     return DISAS_NEXT;
2597 }
2598 #endif
2599 
2600 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2601 {
2602     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2603     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2604     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2605     TCGv_i32 t_r1, t_r2, t_r3, type;
2606 
2607     switch (s->insn->data) {
2608     case S390_FEAT_TYPE_KMA:
2609         if (r3 == r1 || r3 == r2) {
2610             gen_program_exception(s, PGM_SPECIFICATION);
2611             return DISAS_NORETURN;
2612         }
2613         /* FALL THROUGH */
2614     case S390_FEAT_TYPE_KMCTR:
2615         if (r3 & 1 || !r3) {
2616             gen_program_exception(s, PGM_SPECIFICATION);
2617             return DISAS_NORETURN;
2618         }
2619         /* FALL THROUGH */
2620     case S390_FEAT_TYPE_PPNO:
2621     case S390_FEAT_TYPE_KMF:
2622     case S390_FEAT_TYPE_KMC:
2623     case S390_FEAT_TYPE_KMO:
2624     case S390_FEAT_TYPE_KM:
2625         if (r1 & 1 || !r1) {
2626             gen_program_exception(s, PGM_SPECIFICATION);
2627             return DISAS_NORETURN;
2628         }
2629         /* FALL THROUGH */
2630     case S390_FEAT_TYPE_KMAC:
2631     case S390_FEAT_TYPE_KIMD:
2632     case S390_FEAT_TYPE_KLMD:
2633         if (r2 & 1 || !r2) {
2634             gen_program_exception(s, PGM_SPECIFICATION);
2635             return DISAS_NORETURN;
2636         }
2637         /* FALL THROUGH */
2638     case S390_FEAT_TYPE_PCKMO:
2639     case S390_FEAT_TYPE_PCC:
2640         break;
2641     default:
2642         g_assert_not_reached();
2643     };
2644 
2645     t_r1 = tcg_constant_i32(r1);
2646     t_r2 = tcg_constant_i32(r2);
2647     t_r3 = tcg_constant_i32(r3);
2648     type = tcg_constant_i32(s->insn->data);
2649     gen_helper_msa(cc_op, tcg_env, t_r1, t_r2, t_r3, type);
2650     set_cc_static(s);
2651     return DISAS_NEXT;
2652 }
2653 
2654 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2655 {
2656     gen_helper_keb(cc_op, tcg_env, o->in1, o->in2);
2657     set_cc_static(s);
2658     return DISAS_NEXT;
2659 }
2660 
2661 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2662 {
2663     gen_helper_kdb(cc_op, tcg_env, o->in1, o->in2);
2664     set_cc_static(s);
2665     return DISAS_NEXT;
2666 }
2667 
2668 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2669 {
2670     gen_helper_kxb(cc_op, tcg_env, o->in1_128, o->in2_128);
2671     set_cc_static(s);
2672     return DISAS_NEXT;
2673 }
2674 
2675 static DisasJumpType help_laa(DisasContext *s, DisasOps *o, bool addu64)
2676 {
2677     /* The real output is indeed the original value in memory;
2678        recompute the addition for the computation of CC.  */
2679     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2680                                  s->insn->data | MO_ALIGN);
2681     /* However, we need to recompute the addition for setting CC.  */
2682     if (addu64) {
2683         tcg_gen_movi_i64(cc_src, 0);
2684         tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
2685     } else {
2686         tcg_gen_add_i64(o->out, o->in1, o->in2);
2687     }
2688     return DISAS_NEXT;
2689 }
2690 
2691 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2692 {
2693     return help_laa(s, o, false);
2694 }
2695 
2696 static DisasJumpType op_laa_addu64(DisasContext *s, DisasOps *o)
2697 {
2698     return help_laa(s, o, true);
2699 }
2700 
2701 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2702 {
2703     /* The real output is indeed the original value in memory;
2704        recompute the addition for the computation of CC.  */
2705     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2706                                  s->insn->data | MO_ALIGN);
2707     /* However, we need to recompute the operation for setting CC.  */
2708     tcg_gen_and_i64(o->out, o->in1, o->in2);
2709     return DISAS_NEXT;
2710 }
2711 
2712 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2713 {
2714     /* The real output is indeed the original value in memory;
2715        recompute the addition for the computation of CC.  */
2716     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2717                                 s->insn->data | MO_ALIGN);
2718     /* However, we need to recompute the operation for setting CC.  */
2719     tcg_gen_or_i64(o->out, o->in1, o->in2);
2720     return DISAS_NEXT;
2721 }
2722 
2723 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2724 {
2725     /* The real output is indeed the original value in memory;
2726        recompute the addition for the computation of CC.  */
2727     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2728                                  s->insn->data | MO_ALIGN);
2729     /* However, we need to recompute the operation for setting CC.  */
2730     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2731     return DISAS_NEXT;
2732 }
2733 
2734 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2735 {
2736     gen_helper_ldeb(o->out, tcg_env, o->in2);
2737     return DISAS_NEXT;
2738 }
2739 
2740 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2741 {
2742     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2743 
2744     if (!m34) {
2745         return DISAS_NORETURN;
2746     }
2747     gen_helper_ledb(o->out, tcg_env, o->in2, m34);
2748     return DISAS_NEXT;
2749 }
2750 
2751 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2752 {
2753     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2754 
2755     if (!m34) {
2756         return DISAS_NORETURN;
2757     }
2758     gen_helper_ldxb(o->out, tcg_env, o->in2_128, m34);
2759     return DISAS_NEXT;
2760 }
2761 
2762 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2763 {
2764     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2765 
2766     if (!m34) {
2767         return DISAS_NORETURN;
2768     }
2769     gen_helper_lexb(o->out, tcg_env, o->in2_128, m34);
2770     return DISAS_NEXT;
2771 }
2772 
2773 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2774 {
2775     gen_helper_lxdb(o->out_128, tcg_env, o->in2);
2776     return DISAS_NEXT;
2777 }
2778 
2779 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2780 {
2781     gen_helper_lxeb(o->out_128, tcg_env, o->in2);
2782     return DISAS_NEXT;
2783 }
2784 
2785 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2786 {
2787     tcg_gen_shli_i64(o->out, o->in2, 32);
2788     return DISAS_NEXT;
2789 }
2790 
2791 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2792 {
2793     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2794     return DISAS_NEXT;
2795 }
2796 
2797 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2798 {
2799     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2800     return DISAS_NEXT;
2801 }
2802 
2803 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2804 {
2805     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2806     return DISAS_NEXT;
2807 }
2808 
2809 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2810 {
2811     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2812     return DISAS_NEXT;
2813 }
2814 
2815 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2816 {
2817     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2818     return DISAS_NEXT;
2819 }
2820 
2821 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2822 {
2823     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2824                        MO_TESL | s->insn->data);
2825     return DISAS_NEXT;
2826 }
2827 
2828 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2829 {
2830     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2831                        MO_TEUL | s->insn->data);
2832     return DISAS_NEXT;
2833 }
2834 
2835 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2836 {
2837     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2838                         MO_TEUQ | s->insn->data);
2839     return DISAS_NEXT;
2840 }
2841 
2842 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2843 {
2844     TCGLabel *lab = gen_new_label();
2845     store_reg32_i64(get_field(s, r1), o->in2);
2846     /* The value is stored even in case of trap. */
2847     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2848     gen_trap(s);
2849     gen_set_label(lab);
2850     return DISAS_NEXT;
2851 }
2852 
2853 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2854 {
2855     TCGLabel *lab = gen_new_label();
2856     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2857     /* The value is stored even in case of trap. */
2858     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2859     gen_trap(s);
2860     gen_set_label(lab);
2861     return DISAS_NEXT;
2862 }
2863 
2864 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2865 {
2866     TCGLabel *lab = gen_new_label();
2867     store_reg32h_i64(get_field(s, r1), o->in2);
2868     /* The value is stored even in case of trap. */
2869     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2870     gen_trap(s);
2871     gen_set_label(lab);
2872     return DISAS_NEXT;
2873 }
2874 
2875 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2876 {
2877     TCGLabel *lab = gen_new_label();
2878 
2879     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2880     /* The value is stored even in case of trap. */
2881     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2882     gen_trap(s);
2883     gen_set_label(lab);
2884     return DISAS_NEXT;
2885 }
2886 
2887 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2888 {
2889     TCGLabel *lab = gen_new_label();
2890     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2891     /* The value is stored even in case of trap. */
2892     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2893     gen_trap(s);
2894     gen_set_label(lab);
2895     return DISAS_NEXT;
2896 }
2897 
2898 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2899 {
2900     DisasCompare c;
2901 
2902     if (have_field(s, m3)) {
2903         /* LOAD * ON CONDITION */
2904         disas_jcc(s, &c, get_field(s, m3));
2905     } else {
2906         /* SELECT */
2907         disas_jcc(s, &c, get_field(s, m4));
2908     }
2909 
2910     if (c.is_64) {
2911         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2912                             o->in2, o->in1);
2913     } else {
2914         TCGv_i32 t32 = tcg_temp_new_i32();
2915         TCGv_i64 t, z;
2916 
2917         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2918 
2919         t = tcg_temp_new_i64();
2920         tcg_gen_extu_i32_i64(t, t32);
2921 
2922         z = tcg_constant_i64(0);
2923         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2924     }
2925 
2926     return DISAS_NEXT;
2927 }
2928 
2929 #ifndef CONFIG_USER_ONLY
2930 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2931 {
2932     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2933     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2934 
2935     gen_helper_lctl(tcg_env, r1, o->in2, r3);
2936     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2937     s->exit_to_mainloop = true;
2938     return DISAS_TOO_MANY;
2939 }
2940 
2941 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2942 {
2943     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2944     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2945 
2946     gen_helper_lctlg(tcg_env, r1, o->in2, r3);
2947     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2948     s->exit_to_mainloop = true;
2949     return DISAS_TOO_MANY;
2950 }
2951 
2952 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2953 {
2954     gen_helper_lra(o->out, tcg_env, o->out, o->in2);
2955     set_cc_static(s);
2956     return DISAS_NEXT;
2957 }
2958 
2959 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2960 {
2961     tcg_gen_st_i64(o->in2, tcg_env, offsetof(CPUS390XState, pp));
2962     return DISAS_NEXT;
2963 }
2964 
2965 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2966 {
2967     TCGv_i64 mask, addr;
2968 
2969     per_breaking_event(s);
2970 
2971     /*
2972      * Convert the short PSW into the normal PSW, similar to what
2973      * s390_cpu_load_normal() does.
2974      */
2975     mask = tcg_temp_new_i64();
2976     addr = tcg_temp_new_i64();
2977     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2978     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2979     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2980     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2981     gen_helper_load_psw(tcg_env, mask, addr);
2982     return DISAS_NORETURN;
2983 }
2984 
2985 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2986 {
2987     TCGv_i64 t1, t2;
2988 
2989     per_breaking_event(s);
2990 
2991     t1 = tcg_temp_new_i64();
2992     t2 = tcg_temp_new_i64();
2993     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2994                         MO_TEUQ | MO_ALIGN_8);
2995     tcg_gen_addi_i64(o->in2, o->in2, 8);
2996     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2997     gen_helper_load_psw(tcg_env, t1, t2);
2998     return DISAS_NORETURN;
2999 }
3000 #endif
3001 
3002 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3003 {
3004     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3005     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3006 
3007     gen_helper_lam(tcg_env, r1, o->in2, r3);
3008     return DISAS_NEXT;
3009 }
3010 
3011 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3012 {
3013     int r1 = get_field(s, r1);
3014     int r3 = get_field(s, r3);
3015     TCGv_i64 t1, t2;
3016 
3017     /* Only one register to read. */
3018     t1 = tcg_temp_new_i64();
3019     if (unlikely(r1 == r3)) {
3020         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3021         store_reg32_i64(r1, t1);
3022         return DISAS_NEXT;
3023     }
3024 
3025     /* First load the values of the first and last registers to trigger
3026        possible page faults. */
3027     t2 = tcg_temp_new_i64();
3028     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3029     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3030     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3031     store_reg32_i64(r1, t1);
3032     store_reg32_i64(r3, t2);
3033 
3034     /* Only two registers to read. */
3035     if (((r1 + 1) & 15) == r3) {
3036         return DISAS_NEXT;
3037     }
3038 
3039     /* Then load the remaining registers. Page fault can't occur. */
3040     r3 = (r3 - 1) & 15;
3041     tcg_gen_movi_i64(t2, 4);
3042     while (r1 != r3) {
3043         r1 = (r1 + 1) & 15;
3044         tcg_gen_add_i64(o->in2, o->in2, t2);
3045         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3046         store_reg32_i64(r1, t1);
3047     }
3048     return DISAS_NEXT;
3049 }
3050 
3051 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3052 {
3053     int r1 = get_field(s, r1);
3054     int r3 = get_field(s, r3);
3055     TCGv_i64 t1, t2;
3056 
3057     /* Only one register to read. */
3058     t1 = tcg_temp_new_i64();
3059     if (unlikely(r1 == r3)) {
3060         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3061         store_reg32h_i64(r1, t1);
3062         return DISAS_NEXT;
3063     }
3064 
3065     /* First load the values of the first and last registers to trigger
3066        possible page faults. */
3067     t2 = tcg_temp_new_i64();
3068     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3069     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3070     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3071     store_reg32h_i64(r1, t1);
3072     store_reg32h_i64(r3, t2);
3073 
3074     /* Only two registers to read. */
3075     if (((r1 + 1) & 15) == r3) {
3076         return DISAS_NEXT;
3077     }
3078 
3079     /* Then load the remaining registers. Page fault can't occur. */
3080     r3 = (r3 - 1) & 15;
3081     tcg_gen_movi_i64(t2, 4);
3082     while (r1 != r3) {
3083         r1 = (r1 + 1) & 15;
3084         tcg_gen_add_i64(o->in2, o->in2, t2);
3085         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3086         store_reg32h_i64(r1, t1);
3087     }
3088     return DISAS_NEXT;
3089 }
3090 
3091 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3092 {
3093     int r1 = get_field(s, r1);
3094     int r3 = get_field(s, r3);
3095     TCGv_i64 t1, t2;
3096 
3097     /* Only one register to read. */
3098     if (unlikely(r1 == r3)) {
3099         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3100         return DISAS_NEXT;
3101     }
3102 
3103     /* First load the values of the first and last registers to trigger
3104        possible page faults. */
3105     t1 = tcg_temp_new_i64();
3106     t2 = tcg_temp_new_i64();
3107     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3108     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3109     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3110     tcg_gen_mov_i64(regs[r1], t1);
3111 
3112     /* Only two registers to read. */
3113     if (((r1 + 1) & 15) == r3) {
3114         return DISAS_NEXT;
3115     }
3116 
3117     /* Then load the remaining registers. Page fault can't occur. */
3118     r3 = (r3 - 1) & 15;
3119     tcg_gen_movi_i64(t1, 8);
3120     while (r1 != r3) {
3121         r1 = (r1 + 1) & 15;
3122         tcg_gen_add_i64(o->in2, o->in2, t1);
3123         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3124     }
3125     return DISAS_NEXT;
3126 }
3127 
3128 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3129 {
3130     TCGv_i64 a1, a2;
3131     MemOp mop = s->insn->data;
3132 
3133     /* In a parallel context, stop the world and single step.  */
3134     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3135         update_psw_addr(s);
3136         update_cc_op(s);
3137         gen_exception(EXCP_ATOMIC);
3138         return DISAS_NORETURN;
3139     }
3140 
3141     /* In a serial context, perform the two loads ... */
3142     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3143     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3144     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3145     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3146 
3147     /* ... and indicate that we performed them while interlocked.  */
3148     gen_op_movi_cc(s, 0);
3149     return DISAS_NEXT;
3150 }
3151 
3152 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3153 {
3154     o->out_128 = tcg_temp_new_i128();
3155     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3156                          MO_TE | MO_128 | MO_ALIGN);
3157     return DISAS_NEXT;
3158 }
3159 
3160 #ifndef CONFIG_USER_ONLY
3161 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3162 {
3163     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3164     return DISAS_NEXT;
3165 }
3166 #endif
3167 
3168 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3169 {
3170     tcg_gen_andi_i64(o->out, o->in2, -256);
3171     return DISAS_NEXT;
3172 }
3173 
3174 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3175 {
3176     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3177 
3178     if (get_field(s, m3) > 6) {
3179         gen_program_exception(s, PGM_SPECIFICATION);
3180         return DISAS_NORETURN;
3181     }
3182 
3183     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3184     tcg_gen_neg_i64(o->addr1, o->addr1);
3185     tcg_gen_movi_i64(o->out, 16);
3186     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3187     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3188     return DISAS_NEXT;
3189 }
3190 
3191 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3192 {
3193     const uint8_t monitor_class = get_field(s, i2);
3194 
3195     if (monitor_class & 0xf0) {
3196         gen_program_exception(s, PGM_SPECIFICATION);
3197         return DISAS_NORETURN;
3198     }
3199 
3200 #if !defined(CONFIG_USER_ONLY)
3201     gen_helper_monitor_call(tcg_env, o->addr1,
3202                             tcg_constant_i32(monitor_class));
3203 #endif
3204     /* Defaults to a NOP. */
3205     return DISAS_NEXT;
3206 }
3207 
3208 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3209 {
3210     o->out = o->in2;
3211     o->in2 = NULL;
3212     return DISAS_NEXT;
3213 }
3214 
3215 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3216 {
3217     int b2 = get_field(s, b2);
3218     TCGv ar1 = tcg_temp_new_i64();
3219     int r1 = get_field(s, r1);
3220 
3221     o->out = o->in2;
3222     o->in2 = NULL;
3223 
3224     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3225     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3226         tcg_gen_movi_i64(ar1, 0);
3227         break;
3228     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3229         tcg_gen_movi_i64(ar1, 1);
3230         break;
3231     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3232         if (b2) {
3233             tcg_gen_ld32u_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[b2]));
3234         } else {
3235             tcg_gen_movi_i64(ar1, 0);
3236         }
3237         break;
3238     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3239         tcg_gen_movi_i64(ar1, 2);
3240         break;
3241     }
3242 
3243     tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3244     return DISAS_NEXT;
3245 }
3246 
3247 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3248 {
3249     o->out = o->in1;
3250     o->out2 = o->in2;
3251     o->in1 = NULL;
3252     o->in2 = NULL;
3253     return DISAS_NEXT;
3254 }
3255 
3256 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3257 {
3258     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3259 
3260     gen_helper_mvc(tcg_env, l, o->addr1, o->in2);
3261     return DISAS_NEXT;
3262 }
3263 
3264 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3265 {
3266     gen_helper_mvcrl(tcg_env, regs[0], o->addr1, o->in2);
3267     return DISAS_NEXT;
3268 }
3269 
3270 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3271 {
3272     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3273 
3274     gen_helper_mvcin(tcg_env, l, o->addr1, o->in2);
3275     return DISAS_NEXT;
3276 }
3277 
3278 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3279 {
3280     int r1 = get_field(s, r1);
3281     int r2 = get_field(s, r2);
3282     TCGv_i32 t1, t2;
3283 
3284     /* r1 and r2 must be even.  */
3285     if (r1 & 1 || r2 & 1) {
3286         gen_program_exception(s, PGM_SPECIFICATION);
3287         return DISAS_NORETURN;
3288     }
3289 
3290     t1 = tcg_constant_i32(r1);
3291     t2 = tcg_constant_i32(r2);
3292     gen_helper_mvcl(cc_op, tcg_env, t1, t2);
3293     set_cc_static(s);
3294     return DISAS_NEXT;
3295 }
3296 
3297 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3298 {
3299     int r1 = get_field(s, r1);
3300     int r3 = get_field(s, r3);
3301     TCGv_i32 t1, t3;
3302 
3303     /* r1 and r3 must be even.  */
3304     if (r1 & 1 || r3 & 1) {
3305         gen_program_exception(s, PGM_SPECIFICATION);
3306         return DISAS_NORETURN;
3307     }
3308 
3309     t1 = tcg_constant_i32(r1);
3310     t3 = tcg_constant_i32(r3);
3311     gen_helper_mvcle(cc_op, tcg_env, t1, o->in2, t3);
3312     set_cc_static(s);
3313     return DISAS_NEXT;
3314 }
3315 
3316 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3317 {
3318     int r1 = get_field(s, r1);
3319     int r3 = get_field(s, r3);
3320     TCGv_i32 t1, t3;
3321 
3322     /* r1 and r3 must be even.  */
3323     if (r1 & 1 || r3 & 1) {
3324         gen_program_exception(s, PGM_SPECIFICATION);
3325         return DISAS_NORETURN;
3326     }
3327 
3328     t1 = tcg_constant_i32(r1);
3329     t3 = tcg_constant_i32(r3);
3330     gen_helper_mvclu(cc_op, tcg_env, t1, o->in2, t3);
3331     set_cc_static(s);
3332     return DISAS_NEXT;
3333 }
3334 
3335 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3336 {
3337     int r3 = get_field(s, r3);
3338     gen_helper_mvcos(cc_op, tcg_env, o->addr1, o->in2, regs[r3]);
3339     set_cc_static(s);
3340     return DISAS_NEXT;
3341 }
3342 
3343 #ifndef CONFIG_USER_ONLY
3344 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3345 {
3346     int r1 = get_field(s, l1);
3347     int r3 = get_field(s, r3);
3348     gen_helper_mvcp(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3349     set_cc_static(s);
3350     return DISAS_NEXT;
3351 }
3352 
3353 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3354 {
3355     int r1 = get_field(s, l1);
3356     int r3 = get_field(s, r3);
3357     gen_helper_mvcs(cc_op, tcg_env, regs[r1], o->addr1, o->in2, regs[r3]);
3358     set_cc_static(s);
3359     return DISAS_NEXT;
3360 }
3361 #endif
3362 
3363 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3364 {
3365     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3366 
3367     gen_helper_mvn(tcg_env, l, o->addr1, o->in2);
3368     return DISAS_NEXT;
3369 }
3370 
3371 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3372 {
3373     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3374 
3375     gen_helper_mvo(tcg_env, l, o->addr1, o->in2);
3376     return DISAS_NEXT;
3377 }
3378 
3379 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3380 {
3381     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3382     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3383 
3384     gen_helper_mvpg(cc_op, tcg_env, regs[0], t1, t2);
3385     set_cc_static(s);
3386     return DISAS_NEXT;
3387 }
3388 
3389 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3390 {
3391     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3392     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3393 
3394     gen_helper_mvst(cc_op, tcg_env, t1, t2);
3395     set_cc_static(s);
3396     return DISAS_NEXT;
3397 }
3398 
3399 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3400 {
3401     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3402 
3403     gen_helper_mvz(tcg_env, l, o->addr1, o->in2);
3404     return DISAS_NEXT;
3405 }
3406 
3407 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3408 {
3409     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3410     return DISAS_NEXT;
3411 }
3412 
3413 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3414 {
3415     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3416     return DISAS_NEXT;
3417 }
3418 
3419 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3420 {
3421     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3422     return DISAS_NEXT;
3423 }
3424 
3425 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3426 {
3427     gen_helper_meeb(o->out, tcg_env, o->in1, o->in2);
3428     return DISAS_NEXT;
3429 }
3430 
3431 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3432 {
3433     gen_helper_mdeb(o->out, tcg_env, o->in1, o->in2);
3434     return DISAS_NEXT;
3435 }
3436 
3437 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3438 {
3439     gen_helper_mdb(o->out, tcg_env, o->in1, o->in2);
3440     return DISAS_NEXT;
3441 }
3442 
3443 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3444 {
3445     gen_helper_mxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3446     return DISAS_NEXT;
3447 }
3448 
3449 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3450 {
3451     gen_helper_mxdb(o->out_128, tcg_env, o->in1, o->in2);
3452     return DISAS_NEXT;
3453 }
3454 
3455 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3456 {
3457     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3458     gen_helper_maeb(o->out, tcg_env, o->in1, o->in2, r3);
3459     return DISAS_NEXT;
3460 }
3461 
3462 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3463 {
3464     TCGv_i64 r3 = load_freg(get_field(s, r3));
3465     gen_helper_madb(o->out, tcg_env, o->in1, o->in2, r3);
3466     return DISAS_NEXT;
3467 }
3468 
3469 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3470 {
3471     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3472     gen_helper_mseb(o->out, tcg_env, o->in1, o->in2, r3);
3473     return DISAS_NEXT;
3474 }
3475 
3476 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3477 {
3478     TCGv_i64 r3 = load_freg(get_field(s, r3));
3479     gen_helper_msdb(o->out, tcg_env, o->in1, o->in2, r3);
3480     return DISAS_NEXT;
3481 }
3482 
3483 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3484 {
3485     TCGv_i64 z = tcg_constant_i64(0);
3486     TCGv_i64 n = tcg_temp_new_i64();
3487 
3488     tcg_gen_neg_i64(n, o->in2);
3489     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3490     return DISAS_NEXT;
3491 }
3492 
3493 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3494 {
3495     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3496     return DISAS_NEXT;
3497 }
3498 
3499 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3500 {
3501     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3502     return DISAS_NEXT;
3503 }
3504 
3505 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3506 {
3507     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3508     tcg_gen_mov_i64(o->out2, o->in2);
3509     return DISAS_NEXT;
3510 }
3511 
3512 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3513 {
3514     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3515 
3516     gen_helper_nc(cc_op, tcg_env, l, o->addr1, o->in2);
3517     set_cc_static(s);
3518     return DISAS_NEXT;
3519 }
3520 
3521 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3522 {
3523     tcg_gen_neg_i64(o->out, o->in2);
3524     return DISAS_NEXT;
3525 }
3526 
3527 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3528 {
3529     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3530     return DISAS_NEXT;
3531 }
3532 
3533 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3534 {
3535     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3536     return DISAS_NEXT;
3537 }
3538 
3539 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3540 {
3541     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3542     tcg_gen_mov_i64(o->out2, o->in2);
3543     return DISAS_NEXT;
3544 }
3545 
3546 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3547 {
3548     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3549 
3550     gen_helper_oc(cc_op, tcg_env, l, o->addr1, o->in2);
3551     set_cc_static(s);
3552     return DISAS_NEXT;
3553 }
3554 
3555 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3556 {
3557     tcg_gen_or_i64(o->out, o->in1, o->in2);
3558     return DISAS_NEXT;
3559 }
3560 
3561 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3562 {
3563     int shift = s->insn->data & 0xff;
3564     int size = s->insn->data >> 8;
3565     uint64_t mask = ((1ull << size) - 1) << shift;
3566     TCGv_i64 t = tcg_temp_new_i64();
3567 
3568     tcg_gen_shli_i64(t, o->in2, shift);
3569     tcg_gen_or_i64(o->out, o->in1, t);
3570 
3571     /* Produce the CC from only the bits manipulated.  */
3572     tcg_gen_andi_i64(cc_dst, o->out, mask);
3573     set_cc_nz_u64(s, cc_dst);
3574     return DISAS_NEXT;
3575 }
3576 
3577 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3578 {
3579     o->in1 = tcg_temp_new_i64();
3580 
3581     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3582         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3583     } else {
3584         /* Perform the atomic operation in memory. */
3585         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3586                                     s->insn->data);
3587     }
3588 
3589     /* Recompute also for atomic case: needed for setting CC. */
3590     tcg_gen_or_i64(o->out, o->in1, o->in2);
3591 
3592     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3593         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3594     }
3595     return DISAS_NEXT;
3596 }
3597 
3598 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3599 {
3600     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3601 
3602     gen_helper_pack(tcg_env, l, o->addr1, o->in2);
3603     return DISAS_NEXT;
3604 }
3605 
3606 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3607 {
3608     int l2 = get_field(s, l2) + 1;
3609     TCGv_i32 l;
3610 
3611     /* The length must not exceed 32 bytes.  */
3612     if (l2 > 32) {
3613         gen_program_exception(s, PGM_SPECIFICATION);
3614         return DISAS_NORETURN;
3615     }
3616     l = tcg_constant_i32(l2);
3617     gen_helper_pka(tcg_env, o->addr1, o->in2, l);
3618     return DISAS_NEXT;
3619 }
3620 
3621 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3622 {
3623     int l2 = get_field(s, l2) + 1;
3624     TCGv_i32 l;
3625 
3626     /* The length must be even and should not exceed 64 bytes.  */
3627     if ((l2 & 1) || (l2 > 64)) {
3628         gen_program_exception(s, PGM_SPECIFICATION);
3629         return DISAS_NORETURN;
3630     }
3631     l = tcg_constant_i32(l2);
3632     gen_helper_pku(tcg_env, o->addr1, o->in2, l);
3633     return DISAS_NEXT;
3634 }
3635 
3636 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3637 {
3638     const uint8_t m3 = get_field(s, m3);
3639 
3640     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3641         tcg_gen_ctpop_i64(o->out, o->in2);
3642     } else {
3643         gen_helper_popcnt(o->out, o->in2);
3644     }
3645     return DISAS_NEXT;
3646 }
3647 
3648 #ifndef CONFIG_USER_ONLY
3649 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3650 {
3651     gen_helper_ptlb(tcg_env);
3652     return DISAS_NEXT;
3653 }
3654 #endif
3655 
3656 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3657 {
3658     int i3 = get_field(s, i3);
3659     int i4 = get_field(s, i4);
3660     int i5 = get_field(s, i5);
3661     int do_zero = i4 & 0x80;
3662     uint64_t mask, imask, pmask;
3663     int pos, len, rot;
3664 
3665     /* Adjust the arguments for the specific insn.  */
3666     switch (s->fields.op2) {
3667     case 0x55: /* risbg */
3668     case 0x59: /* risbgn */
3669         i3 &= 63;
3670         i4 &= 63;
3671         pmask = ~0;
3672         break;
3673     case 0x5d: /* risbhg */
3674         i3 &= 31;
3675         i4 &= 31;
3676         pmask = 0xffffffff00000000ull;
3677         break;
3678     case 0x51: /* risblg */
3679         i3 = (i3 & 31) + 32;
3680         i4 = (i4 & 31) + 32;
3681         pmask = 0x00000000ffffffffull;
3682         break;
3683     default:
3684         g_assert_not_reached();
3685     }
3686 
3687     /* MASK is the set of bits to be inserted from R2. */
3688     if (i3 <= i4) {
3689         /* [0...i3---i4...63] */
3690         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3691     } else {
3692         /* [0---i4...i3---63] */
3693         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3694     }
3695     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3696     mask &= pmask;
3697 
3698     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3699        insns, we need to keep the other half of the register.  */
3700     imask = ~mask | ~pmask;
3701     if (do_zero) {
3702         imask = ~pmask;
3703     }
3704 
3705     len = i4 - i3 + 1;
3706     pos = 63 - i4;
3707     rot = i5 & 63;
3708 
3709     /* In some cases we can implement this with extract.  */
3710     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3711         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3712         return DISAS_NEXT;
3713     }
3714 
3715     /* In some cases we can implement this with deposit.  */
3716     if (len > 0 && (imask == 0 || ~mask == imask)) {
3717         /* Note that we rotate the bits to be inserted to the lsb, not to
3718            the position as described in the PoO.  */
3719         rot = (rot - pos) & 63;
3720     } else {
3721         pos = -1;
3722     }
3723 
3724     /* Rotate the input as necessary.  */
3725     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3726 
3727     /* Insert the selected bits into the output.  */
3728     if (pos >= 0) {
3729         if (imask == 0) {
3730             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3731         } else {
3732             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3733         }
3734     } else if (imask == 0) {
3735         tcg_gen_andi_i64(o->out, o->in2, mask);
3736     } else {
3737         tcg_gen_andi_i64(o->in2, o->in2, mask);
3738         tcg_gen_andi_i64(o->out, o->out, imask);
3739         tcg_gen_or_i64(o->out, o->out, o->in2);
3740     }
3741     return DISAS_NEXT;
3742 }
3743 
3744 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3745 {
3746     int i3 = get_field(s, i3);
3747     int i4 = get_field(s, i4);
3748     int i5 = get_field(s, i5);
3749     TCGv_i64 orig_out;
3750     uint64_t mask;
3751 
3752     /* If this is a test-only form, arrange to discard the result.  */
3753     if (i3 & 0x80) {
3754         tcg_debug_assert(o->out != NULL);
3755         orig_out = o->out;
3756         o->out = tcg_temp_new_i64();
3757         tcg_gen_mov_i64(o->out, orig_out);
3758     }
3759 
3760     i3 &= 63;
3761     i4 &= 63;
3762     i5 &= 63;
3763 
3764     /* MASK is the set of bits to be operated on from R2.
3765        Take care for I3/I4 wraparound.  */
3766     mask = ~0ull >> i3;
3767     if (i3 <= i4) {
3768         mask ^= ~0ull >> i4 >> 1;
3769     } else {
3770         mask |= ~(~0ull >> i4 >> 1);
3771     }
3772 
3773     /* Rotate the input as necessary.  */
3774     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3775 
3776     /* Operate.  */
3777     switch (s->fields.op2) {
3778     case 0x54: /* AND */
3779         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3780         tcg_gen_and_i64(o->out, o->out, o->in2);
3781         break;
3782     case 0x56: /* OR */
3783         tcg_gen_andi_i64(o->in2, o->in2, mask);
3784         tcg_gen_or_i64(o->out, o->out, o->in2);
3785         break;
3786     case 0x57: /* XOR */
3787         tcg_gen_andi_i64(o->in2, o->in2, mask);
3788         tcg_gen_xor_i64(o->out, o->out, o->in2);
3789         break;
3790     default:
3791         abort();
3792     }
3793 
3794     /* Set the CC.  */
3795     tcg_gen_andi_i64(cc_dst, o->out, mask);
3796     set_cc_nz_u64(s, cc_dst);
3797     return DISAS_NEXT;
3798 }
3799 
3800 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3801 {
3802     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3803     return DISAS_NEXT;
3804 }
3805 
3806 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3807 {
3808     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3809     return DISAS_NEXT;
3810 }
3811 
3812 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3813 {
3814     tcg_gen_bswap64_i64(o->out, o->in2);
3815     return DISAS_NEXT;
3816 }
3817 
3818 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3819 {
3820     TCGv_i32 t1 = tcg_temp_new_i32();
3821     TCGv_i32 t2 = tcg_temp_new_i32();
3822     TCGv_i32 to = tcg_temp_new_i32();
3823     tcg_gen_extrl_i64_i32(t1, o->in1);
3824     tcg_gen_extrl_i64_i32(t2, o->in2);
3825     tcg_gen_rotl_i32(to, t1, t2);
3826     tcg_gen_extu_i32_i64(o->out, to);
3827     return DISAS_NEXT;
3828 }
3829 
3830 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3831 {
3832     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3833     return DISAS_NEXT;
3834 }
3835 
3836 #ifndef CONFIG_USER_ONLY
3837 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3838 {
3839     gen_helper_rrbe(cc_op, tcg_env, o->in2);
3840     set_cc_static(s);
3841     return DISAS_NEXT;
3842 }
3843 
3844 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3845 {
3846     gen_helper_sacf(tcg_env, o->in2);
3847     /* Addressing mode has changed, so end the block.  */
3848     return DISAS_TOO_MANY;
3849 }
3850 #endif
3851 
3852 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3853 {
3854     int sam = s->insn->data;
3855     TCGv_i64 tsam;
3856     uint64_t mask;
3857 
3858     switch (sam) {
3859     case 0:
3860         mask = 0xffffff;
3861         break;
3862     case 1:
3863         mask = 0x7fffffff;
3864         break;
3865     default:
3866         mask = -1;
3867         break;
3868     }
3869 
3870     /* Bizarre but true, we check the address of the current insn for the
3871        specification exception, not the next to be executed.  Thus the PoO
3872        documents that Bad Things Happen two bytes before the end.  */
3873     if (s->base.pc_next & ~mask) {
3874         gen_program_exception(s, PGM_SPECIFICATION);
3875         return DISAS_NORETURN;
3876     }
3877     s->pc_tmp &= mask;
3878 
3879     tsam = tcg_constant_i64(sam);
3880     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3881 
3882     /* Always exit the TB, since we (may have) changed execution mode.  */
3883     return DISAS_TOO_MANY;
3884 }
3885 
3886 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3887 {
3888     int r1 = get_field(s, r1);
3889     tcg_gen_st32_i64(o->in2, tcg_env, offsetof(CPUS390XState, aregs[r1]));
3890     return DISAS_NEXT;
3891 }
3892 
3893 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3894 {
3895     gen_helper_seb(o->out, tcg_env, o->in1, o->in2);
3896     return DISAS_NEXT;
3897 }
3898 
3899 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3900 {
3901     gen_helper_sdb(o->out, tcg_env, o->in1, o->in2);
3902     return DISAS_NEXT;
3903 }
3904 
3905 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3906 {
3907     gen_helper_sxb(o->out_128, tcg_env, o->in1_128, o->in2_128);
3908     return DISAS_NEXT;
3909 }
3910 
3911 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3912 {
3913     gen_helper_sqeb(o->out, tcg_env, o->in2);
3914     return DISAS_NEXT;
3915 }
3916 
3917 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3918 {
3919     gen_helper_sqdb(o->out, tcg_env, o->in2);
3920     return DISAS_NEXT;
3921 }
3922 
3923 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3924 {
3925     gen_helper_sqxb(o->out_128, tcg_env, o->in2_128);
3926     return DISAS_NEXT;
3927 }
3928 
3929 #ifndef CONFIG_USER_ONLY
3930 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3931 {
3932     gen_helper_servc(cc_op, tcg_env, o->in2, o->in1);
3933     set_cc_static(s);
3934     return DISAS_NEXT;
3935 }
3936 
3937 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3938 {
3939     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3940     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3941 
3942     gen_helper_sigp(cc_op, tcg_env, o->in2, r1, r3);
3943     set_cc_static(s);
3944     return DISAS_NEXT;
3945 }
3946 #endif
3947 
3948 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3949 {
3950     DisasCompare c;
3951     TCGv_i64 a, h;
3952     TCGLabel *lab;
3953     int r1;
3954 
3955     disas_jcc(s, &c, get_field(s, m3));
3956 
3957     /* We want to store when the condition is fulfilled, so branch
3958        out when it's not */
3959     c.cond = tcg_invert_cond(c.cond);
3960 
3961     lab = gen_new_label();
3962     if (c.is_64) {
3963         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3964     } else {
3965         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3966     }
3967 
3968     r1 = get_field(s, r1);
3969     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3970     switch (s->insn->data) {
3971     case 1: /* STOCG */
3972         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3973         break;
3974     case 0: /* STOC */
3975         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3976         break;
3977     case 2: /* STOCFH */
3978         h = tcg_temp_new_i64();
3979         tcg_gen_shri_i64(h, regs[r1], 32);
3980         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3981         break;
3982     default:
3983         g_assert_not_reached();
3984     }
3985 
3986     gen_set_label(lab);
3987     return DISAS_NEXT;
3988 }
3989 
3990 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3991 {
3992     TCGv_i64 t;
3993     uint64_t sign = 1ull << s->insn->data;
3994     if (s->insn->data == 31) {
3995         t = tcg_temp_new_i64();
3996         tcg_gen_shli_i64(t, o->in1, 32);
3997     } else {
3998         t = o->in1;
3999     }
4000     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4001     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4002     /* The arithmetic left shift is curious in that it does not affect
4003        the sign bit.  Copy that over from the source unchanged.  */
4004     tcg_gen_andi_i64(o->out, o->out, ~sign);
4005     tcg_gen_andi_i64(o->in1, o->in1, sign);
4006     tcg_gen_or_i64(o->out, o->out, o->in1);
4007     return DISAS_NEXT;
4008 }
4009 
4010 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4011 {
4012     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4013     return DISAS_NEXT;
4014 }
4015 
4016 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4017 {
4018     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4019     return DISAS_NEXT;
4020 }
4021 
4022 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4023 {
4024     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4025     return DISAS_NEXT;
4026 }
4027 
4028 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4029 {
4030     gen_helper_sfpc(tcg_env, o->in2);
4031     return DISAS_NEXT;
4032 }
4033 
4034 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4035 {
4036     gen_helper_sfas(tcg_env, o->in2);
4037     return DISAS_NEXT;
4038 }
4039 
4040 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4041 {
4042     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4043     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4044     gen_helper_srnm(tcg_env, o->addr1);
4045     return DISAS_NEXT;
4046 }
4047 
4048 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4049 {
4050     /* Bits 0-55 are are ignored. */
4051     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4052     gen_helper_srnm(tcg_env, o->addr1);
4053     return DISAS_NEXT;
4054 }
4055 
4056 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4057 {
4058     TCGv_i64 tmp = tcg_temp_new_i64();
4059 
4060     /* Bits other than 61-63 are ignored. */
4061     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4062 
4063     /* No need to call a helper, we don't implement dfp */
4064     tcg_gen_ld32u_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4065     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4066     tcg_gen_st32_i64(tmp, tcg_env, offsetof(CPUS390XState, fpc));
4067     return DISAS_NEXT;
4068 }
4069 
4070 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4071 {
4072     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4073     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4074     set_cc_static(s);
4075 
4076     tcg_gen_shri_i64(o->in1, o->in1, 24);
4077     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4078     return DISAS_NEXT;
4079 }
4080 
4081 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4082 {
4083     int b1 = get_field(s, b1);
4084     int d1 = get_field(s, d1);
4085     int b2 = get_field(s, b2);
4086     int d2 = get_field(s, d2);
4087     int r3 = get_field(s, r3);
4088     TCGv_i64 tmp = tcg_temp_new_i64();
4089 
4090     /* fetch all operands first */
4091     o->in1 = tcg_temp_new_i64();
4092     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4093     o->in2 = tcg_temp_new_i64();
4094     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4095     o->addr1 = tcg_temp_new_i64();
4096     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4097 
4098     /* load the third operand into r3 before modifying anything */
4099     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4100 
4101     /* subtract CPU timer from first operand and store in GR0 */
4102     gen_helper_stpt(tmp, tcg_env);
4103     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4104 
4105     /* store second operand in GR1 */
4106     tcg_gen_mov_i64(regs[1], o->in2);
4107     return DISAS_NEXT;
4108 }
4109 
4110 #ifndef CONFIG_USER_ONLY
4111 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4112 {
4113     tcg_gen_shri_i64(o->in2, o->in2, 4);
4114     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4115     return DISAS_NEXT;
4116 }
4117 
4118 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4119 {
4120     gen_helper_sske(tcg_env, o->in1, o->in2);
4121     return DISAS_NEXT;
4122 }
4123 
4124 static void gen_check_psw_mask(DisasContext *s)
4125 {
4126     TCGv_i64 reserved = tcg_temp_new_i64();
4127     TCGLabel *ok = gen_new_label();
4128 
4129     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4130     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4131     gen_program_exception(s, PGM_SPECIFICATION);
4132     gen_set_label(ok);
4133 }
4134 
4135 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4136 {
4137     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4138 
4139     gen_check_psw_mask(s);
4140 
4141     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4142     s->exit_to_mainloop = true;
4143     return DISAS_TOO_MANY;
4144 }
4145 
4146 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4147 {
4148     tcg_gen_ld32u_i64(o->out, tcg_env, offsetof(CPUS390XState, core_id));
4149     return DISAS_NEXT;
4150 }
4151 #endif
4152 
4153 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4154 {
4155     gen_helper_stck(o->out, tcg_env);
4156     /* ??? We don't implement clock states.  */
4157     gen_op_movi_cc(s, 0);
4158     return DISAS_NEXT;
4159 }
4160 
4161 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4162 {
4163     TCGv_i64 c1 = tcg_temp_new_i64();
4164     TCGv_i64 c2 = tcg_temp_new_i64();
4165     TCGv_i64 todpr = tcg_temp_new_i64();
4166     gen_helper_stck(c1, tcg_env);
4167     /* 16 bit value store in an uint32_t (only valid bits set) */
4168     tcg_gen_ld32u_i64(todpr, tcg_env, offsetof(CPUS390XState, todpr));
4169     /* Shift the 64-bit value into its place as a zero-extended
4170        104-bit value.  Note that "bit positions 64-103 are always
4171        non-zero so that they compare differently to STCK"; we set
4172        the least significant bit to 1.  */
4173     tcg_gen_shli_i64(c2, c1, 56);
4174     tcg_gen_shri_i64(c1, c1, 8);
4175     tcg_gen_ori_i64(c2, c2, 0x10000);
4176     tcg_gen_or_i64(c2, c2, todpr);
4177     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4178     tcg_gen_addi_i64(o->in2, o->in2, 8);
4179     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4180     /* ??? We don't implement clock states.  */
4181     gen_op_movi_cc(s, 0);
4182     return DISAS_NEXT;
4183 }
4184 
4185 #ifndef CONFIG_USER_ONLY
4186 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4187 {
4188     gen_helper_sck(cc_op, tcg_env, o->in2);
4189     set_cc_static(s);
4190     return DISAS_NEXT;
4191 }
4192 
4193 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4194 {
4195     gen_helper_sckc(tcg_env, o->in2);
4196     return DISAS_NEXT;
4197 }
4198 
4199 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4200 {
4201     gen_helper_sckpf(tcg_env, regs[0]);
4202     return DISAS_NEXT;
4203 }
4204 
4205 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4206 {
4207     gen_helper_stckc(o->out, tcg_env);
4208     return DISAS_NEXT;
4209 }
4210 
4211 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4212 {
4213     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4214     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4215 
4216     gen_helper_stctg(tcg_env, r1, o->in2, r3);
4217     return DISAS_NEXT;
4218 }
4219 
4220 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4221 {
4222     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4223     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4224 
4225     gen_helper_stctl(tcg_env, r1, o->in2, r3);
4226     return DISAS_NEXT;
4227 }
4228 
4229 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4230 {
4231     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, cpuid));
4232     return DISAS_NEXT;
4233 }
4234 
4235 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4236 {
4237     gen_helper_spt(tcg_env, o->in2);
4238     return DISAS_NEXT;
4239 }
4240 
4241 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4242 {
4243     gen_helper_stfl(tcg_env);
4244     return DISAS_NEXT;
4245 }
4246 
4247 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4248 {
4249     gen_helper_stpt(o->out, tcg_env);
4250     return DISAS_NEXT;
4251 }
4252 
4253 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4254 {
4255     gen_helper_stsi(cc_op, tcg_env, o->in2, regs[0], regs[1]);
4256     set_cc_static(s);
4257     return DISAS_NEXT;
4258 }
4259 
4260 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4261 {
4262     gen_helper_spx(tcg_env, o->in2);
4263     return DISAS_NEXT;
4264 }
4265 
4266 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4267 {
4268     gen_helper_xsch(tcg_env, regs[1]);
4269     set_cc_static(s);
4270     return DISAS_NEXT;
4271 }
4272 
4273 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4274 {
4275     gen_helper_csch(tcg_env, regs[1]);
4276     set_cc_static(s);
4277     return DISAS_NEXT;
4278 }
4279 
4280 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4281 {
4282     gen_helper_hsch(tcg_env, regs[1]);
4283     set_cc_static(s);
4284     return DISAS_NEXT;
4285 }
4286 
4287 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4288 {
4289     gen_helper_msch(tcg_env, regs[1], o->in2);
4290     set_cc_static(s);
4291     return DISAS_NEXT;
4292 }
4293 
4294 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4295 {
4296     gen_helper_rchp(tcg_env, regs[1]);
4297     set_cc_static(s);
4298     return DISAS_NEXT;
4299 }
4300 
4301 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4302 {
4303     gen_helper_rsch(tcg_env, regs[1]);
4304     set_cc_static(s);
4305     return DISAS_NEXT;
4306 }
4307 
4308 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4309 {
4310     gen_helper_sal(tcg_env, regs[1]);
4311     return DISAS_NEXT;
4312 }
4313 
4314 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4315 {
4316     gen_helper_schm(tcg_env, regs[1], regs[2], o->in2);
4317     return DISAS_NEXT;
4318 }
4319 
4320 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4321 {
4322     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4323     gen_op_movi_cc(s, 3);
4324     return DISAS_NEXT;
4325 }
4326 
4327 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4328 {
4329     /* The instruction is suppressed if not provided. */
4330     return DISAS_NEXT;
4331 }
4332 
4333 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4334 {
4335     gen_helper_ssch(tcg_env, regs[1], o->in2);
4336     set_cc_static(s);
4337     return DISAS_NEXT;
4338 }
4339 
4340 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4341 {
4342     gen_helper_stsch(tcg_env, regs[1], o->in2);
4343     set_cc_static(s);
4344     return DISAS_NEXT;
4345 }
4346 
4347 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4348 {
4349     gen_helper_stcrw(tcg_env, o->in2);
4350     set_cc_static(s);
4351     return DISAS_NEXT;
4352 }
4353 
4354 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4355 {
4356     gen_helper_tpi(cc_op, tcg_env, o->addr1);
4357     set_cc_static(s);
4358     return DISAS_NEXT;
4359 }
4360 
4361 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4362 {
4363     gen_helper_tsch(tcg_env, regs[1], o->in2);
4364     set_cc_static(s);
4365     return DISAS_NEXT;
4366 }
4367 
4368 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4369 {
4370     gen_helper_chsc(tcg_env, o->in2);
4371     set_cc_static(s);
4372     return DISAS_NEXT;
4373 }
4374 
4375 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4376 {
4377     tcg_gen_ld_i64(o->out, tcg_env, offsetof(CPUS390XState, psa));
4378     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4379     return DISAS_NEXT;
4380 }
4381 
4382 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4383 {
4384     uint64_t i2 = get_field(s, i2);
4385     TCGv_i64 t;
4386 
4387     /* It is important to do what the instruction name says: STORE THEN.
4388        If we let the output hook perform the store then if we fault and
4389        restart, we'll have the wrong SYSTEM MASK in place.  */
4390     t = tcg_temp_new_i64();
4391     tcg_gen_shri_i64(t, psw_mask, 56);
4392     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4393 
4394     if (s->fields.op == 0xac) {
4395         tcg_gen_andi_i64(psw_mask, psw_mask,
4396                          (i2 << 56) | 0x00ffffffffffffffull);
4397     } else {
4398         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4399     }
4400 
4401     gen_check_psw_mask(s);
4402 
4403     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4404     s->exit_to_mainloop = true;
4405     return DISAS_TOO_MANY;
4406 }
4407 
4408 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4409 {
4410     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4411 
4412     if (s->base.tb->flags & FLAG_MASK_PER) {
4413         update_psw_addr(s);
4414         gen_helper_per_store_real(tcg_env);
4415     }
4416     return DISAS_NEXT;
4417 }
4418 #endif
4419 
4420 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4421 {
4422     gen_helper_stfle(cc_op, tcg_env, o->in2);
4423     set_cc_static(s);
4424     return DISAS_NEXT;
4425 }
4426 
4427 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4428 {
4429     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4430     return DISAS_NEXT;
4431 }
4432 
4433 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4434 {
4435     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4436     return DISAS_NEXT;
4437 }
4438 
4439 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4440 {
4441     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4442                        MO_TEUL | s->insn->data);
4443     return DISAS_NEXT;
4444 }
4445 
4446 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4447 {
4448     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4449                         MO_TEUQ | s->insn->data);
4450     return DISAS_NEXT;
4451 }
4452 
4453 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4454 {
4455     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4456     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4457 
4458     gen_helper_stam(tcg_env, r1, o->in2, r3);
4459     return DISAS_NEXT;
4460 }
4461 
4462 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4463 {
4464     int m3 = get_field(s, m3);
4465     int pos, base = s->insn->data;
4466     TCGv_i64 tmp = tcg_temp_new_i64();
4467 
4468     pos = base + ctz32(m3) * 8;
4469     switch (m3) {
4470     case 0xf:
4471         /* Effectively a 32-bit store.  */
4472         tcg_gen_shri_i64(tmp, o->in1, pos);
4473         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4474         break;
4475 
4476     case 0xc:
4477     case 0x6:
4478     case 0x3:
4479         /* Effectively a 16-bit store.  */
4480         tcg_gen_shri_i64(tmp, o->in1, pos);
4481         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4482         break;
4483 
4484     case 0x8:
4485     case 0x4:
4486     case 0x2:
4487     case 0x1:
4488         /* Effectively an 8-bit store.  */
4489         tcg_gen_shri_i64(tmp, o->in1, pos);
4490         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4491         break;
4492 
4493     default:
4494         /* This is going to be a sequence of shifts and stores.  */
4495         pos = base + 32 - 8;
4496         while (m3) {
4497             if (m3 & 0x8) {
4498                 tcg_gen_shri_i64(tmp, o->in1, pos);
4499                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4500                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4501             }
4502             m3 = (m3 << 1) & 0xf;
4503             pos -= 8;
4504         }
4505         break;
4506     }
4507     return DISAS_NEXT;
4508 }
4509 
4510 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4511 {
4512     int r1 = get_field(s, r1);
4513     int r3 = get_field(s, r3);
4514     int size = s->insn->data;
4515     TCGv_i64 tsize = tcg_constant_i64(size);
4516 
4517     while (1) {
4518         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4519                             size == 8 ? MO_TEUQ : MO_TEUL);
4520         if (r1 == r3) {
4521             break;
4522         }
4523         tcg_gen_add_i64(o->in2, o->in2, tsize);
4524         r1 = (r1 + 1) & 15;
4525     }
4526 
4527     return DISAS_NEXT;
4528 }
4529 
4530 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4531 {
4532     int r1 = get_field(s, r1);
4533     int r3 = get_field(s, r3);
4534     TCGv_i64 t = tcg_temp_new_i64();
4535     TCGv_i64 t4 = tcg_constant_i64(4);
4536     TCGv_i64 t32 = tcg_constant_i64(32);
4537 
4538     while (1) {
4539         tcg_gen_shl_i64(t, regs[r1], t32);
4540         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4541         if (r1 == r3) {
4542             break;
4543         }
4544         tcg_gen_add_i64(o->in2, o->in2, t4);
4545         r1 = (r1 + 1) & 15;
4546     }
4547     return DISAS_NEXT;
4548 }
4549 
4550 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4551 {
4552     TCGv_i128 t16 = tcg_temp_new_i128();
4553 
4554     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4555     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4556                          MO_TE | MO_128 | MO_ALIGN);
4557     return DISAS_NEXT;
4558 }
4559 
4560 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4561 {
4562     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4563     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4564 
4565     gen_helper_srst(tcg_env, r1, r2);
4566     set_cc_static(s);
4567     return DISAS_NEXT;
4568 }
4569 
4570 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4571 {
4572     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4573     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4574 
4575     gen_helper_srstu(tcg_env, r1, r2);
4576     set_cc_static(s);
4577     return DISAS_NEXT;
4578 }
4579 
4580 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4581 {
4582     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4583     return DISAS_NEXT;
4584 }
4585 
4586 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4587 {
4588     tcg_gen_movi_i64(cc_src, 0);
4589     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4590     return DISAS_NEXT;
4591 }
4592 
4593 /* Compute borrow (0, -1) into cc_src. */
4594 static void compute_borrow(DisasContext *s)
4595 {
4596     switch (s->cc_op) {
4597     case CC_OP_SUBU:
4598         /* The borrow value is already in cc_src (0,-1). */
4599         break;
4600     default:
4601         gen_op_calc_cc(s);
4602         /* fall through */
4603     case CC_OP_STATIC:
4604         /* The carry flag is the msb of CC; compute into cc_src. */
4605         tcg_gen_extu_i32_i64(cc_src, cc_op);
4606         tcg_gen_shri_i64(cc_src, cc_src, 1);
4607         /* fall through */
4608     case CC_OP_ADDU:
4609         /* Convert carry (1,0) to borrow (0,-1). */
4610         tcg_gen_subi_i64(cc_src, cc_src, 1);
4611         break;
4612     }
4613 }
4614 
4615 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4616 {
4617     compute_borrow(s);
4618 
4619     /* Borrow is {0, -1}, so add to subtract. */
4620     tcg_gen_add_i64(o->out, o->in1, cc_src);
4621     tcg_gen_sub_i64(o->out, o->out, o->in2);
4622     return DISAS_NEXT;
4623 }
4624 
4625 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4626 {
4627     compute_borrow(s);
4628 
4629     /*
4630      * Borrow is {0, -1}, so add to subtract; replicate the
4631      * borrow input to produce 128-bit -1 for the addition.
4632      */
4633     TCGv_i64 zero = tcg_constant_i64(0);
4634     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4635     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4636 
4637     return DISAS_NEXT;
4638 }
4639 
4640 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4641 {
4642     TCGv_i32 t;
4643 
4644     update_psw_addr(s);
4645     update_cc_op(s);
4646 
4647     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4648     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_code));
4649 
4650     t = tcg_constant_i32(s->ilen);
4651     tcg_gen_st_i32(t, tcg_env, offsetof(CPUS390XState, int_svc_ilen));
4652 
4653     gen_exception(EXCP_SVC);
4654     return DISAS_NORETURN;
4655 }
4656 
4657 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4658 {
4659     int cc = 0;
4660 
4661     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4662     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4663     gen_op_movi_cc(s, cc);
4664     return DISAS_NEXT;
4665 }
4666 
4667 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4668 {
4669     gen_helper_tceb(cc_op, tcg_env, o->in1, o->in2);
4670     set_cc_static(s);
4671     return DISAS_NEXT;
4672 }
4673 
4674 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4675 {
4676     gen_helper_tcdb(cc_op, tcg_env, o->in1, o->in2);
4677     set_cc_static(s);
4678     return DISAS_NEXT;
4679 }
4680 
4681 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4682 {
4683     gen_helper_tcxb(cc_op, tcg_env, o->in1_128, o->in2);
4684     set_cc_static(s);
4685     return DISAS_NEXT;
4686 }
4687 
4688 #ifndef CONFIG_USER_ONLY
4689 
4690 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4691 {
4692     gen_helper_testblock(cc_op, tcg_env, o->in2);
4693     set_cc_static(s);
4694     return DISAS_NEXT;
4695 }
4696 
4697 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4698 {
4699     gen_helper_tprot(cc_op, tcg_env, o->addr1, o->in2);
4700     set_cc_static(s);
4701     return DISAS_NEXT;
4702 }
4703 
4704 #endif
4705 
4706 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4707 {
4708     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4709 
4710     gen_helper_tp(cc_op, tcg_env, o->addr1, l1);
4711     set_cc_static(s);
4712     return DISAS_NEXT;
4713 }
4714 
4715 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4716 {
4717     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4718 
4719     gen_helper_tr(tcg_env, l, o->addr1, o->in2);
4720     set_cc_static(s);
4721     return DISAS_NEXT;
4722 }
4723 
4724 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4725 {
4726     TCGv_i128 pair = tcg_temp_new_i128();
4727 
4728     gen_helper_tre(pair, tcg_env, o->out, o->out2, o->in2);
4729     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4730     set_cc_static(s);
4731     return DISAS_NEXT;
4732 }
4733 
4734 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4735 {
4736     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4737 
4738     gen_helper_trt(cc_op, tcg_env, l, o->addr1, o->in2);
4739     set_cc_static(s);
4740     return DISAS_NEXT;
4741 }
4742 
4743 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4744 {
4745     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4746 
4747     gen_helper_trtr(cc_op, tcg_env, l, o->addr1, o->in2);
4748     set_cc_static(s);
4749     return DISAS_NEXT;
4750 }
4751 
4752 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4753 {
4754     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4755     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4756     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4757     TCGv_i32 tst = tcg_temp_new_i32();
4758     int m3 = get_field(s, m3);
4759 
4760     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4761         m3 = 0;
4762     }
4763     if (m3 & 1) {
4764         tcg_gen_movi_i32(tst, -1);
4765     } else {
4766         tcg_gen_extrl_i64_i32(tst, regs[0]);
4767         if (s->insn->opc & 3) {
4768             tcg_gen_ext8u_i32(tst, tst);
4769         } else {
4770             tcg_gen_ext16u_i32(tst, tst);
4771         }
4772     }
4773     gen_helper_trXX(cc_op, tcg_env, r1, r2, tst, sizes);
4774 
4775     set_cc_static(s);
4776     return DISAS_NEXT;
4777 }
4778 
4779 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4780 {
4781     TCGv_i32 ff = tcg_constant_i32(0xff);
4782     TCGv_i32 t1 = tcg_temp_new_i32();
4783 
4784     tcg_gen_atomic_xchg_i32(t1, o->in2, ff, get_mem_index(s), MO_UB);
4785     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4786     set_cc_static(s);
4787     return DISAS_NEXT;
4788 }
4789 
4790 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4791 {
4792     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4793 
4794     gen_helper_unpk(tcg_env, l, o->addr1, o->in2);
4795     return DISAS_NEXT;
4796 }
4797 
4798 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4799 {
4800     int l1 = get_field(s, l1) + 1;
4801     TCGv_i32 l;
4802 
4803     /* The length must not exceed 32 bytes.  */
4804     if (l1 > 32) {
4805         gen_program_exception(s, PGM_SPECIFICATION);
4806         return DISAS_NORETURN;
4807     }
4808     l = tcg_constant_i32(l1);
4809     gen_helper_unpka(cc_op, tcg_env, o->addr1, l, o->in2);
4810     set_cc_static(s);
4811     return DISAS_NEXT;
4812 }
4813 
4814 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4815 {
4816     int l1 = get_field(s, l1) + 1;
4817     TCGv_i32 l;
4818 
4819     /* The length must be even and should not exceed 64 bytes.  */
4820     if ((l1 & 1) || (l1 > 64)) {
4821         gen_program_exception(s, PGM_SPECIFICATION);
4822         return DISAS_NORETURN;
4823     }
4824     l = tcg_constant_i32(l1);
4825     gen_helper_unpku(cc_op, tcg_env, o->addr1, l, o->in2);
4826     set_cc_static(s);
4827     return DISAS_NEXT;
4828 }
4829 
4830 
4831 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4832 {
4833     int d1 = get_field(s, d1);
4834     int d2 = get_field(s, d2);
4835     int b1 = get_field(s, b1);
4836     int b2 = get_field(s, b2);
4837     int l = get_field(s, l1);
4838     TCGv_i32 t32;
4839 
4840     o->addr1 = get_address(s, 0, b1, d1);
4841 
4842     /* If the addresses are identical, this is a store/memset of zero.  */
4843     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4844         o->in2 = tcg_constant_i64(0);
4845 
4846         l++;
4847         while (l >= 8) {
4848             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4849             l -= 8;
4850             if (l > 0) {
4851                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4852             }
4853         }
4854         if (l >= 4) {
4855             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4856             l -= 4;
4857             if (l > 0) {
4858                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4859             }
4860         }
4861         if (l >= 2) {
4862             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4863             l -= 2;
4864             if (l > 0) {
4865                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4866             }
4867         }
4868         if (l) {
4869             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4870         }
4871         gen_op_movi_cc(s, 0);
4872         return DISAS_NEXT;
4873     }
4874 
4875     /* But in general we'll defer to a helper.  */
4876     o->in2 = get_address(s, 0, b2, d2);
4877     t32 = tcg_constant_i32(l);
4878     gen_helper_xc(cc_op, tcg_env, t32, o->addr1, o->in2);
4879     set_cc_static(s);
4880     return DISAS_NEXT;
4881 }
4882 
4883 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4884 {
4885     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4886     return DISAS_NEXT;
4887 }
4888 
4889 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4890 {
4891     int shift = s->insn->data & 0xff;
4892     int size = s->insn->data >> 8;
4893     uint64_t mask = ((1ull << size) - 1) << shift;
4894     TCGv_i64 t = tcg_temp_new_i64();
4895 
4896     tcg_gen_shli_i64(t, o->in2, shift);
4897     tcg_gen_xor_i64(o->out, o->in1, t);
4898 
4899     /* Produce the CC from only the bits manipulated.  */
4900     tcg_gen_andi_i64(cc_dst, o->out, mask);
4901     set_cc_nz_u64(s, cc_dst);
4902     return DISAS_NEXT;
4903 }
4904 
4905 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4906 {
4907     o->in1 = tcg_temp_new_i64();
4908 
4909     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4910         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4911     } else {
4912         /* Perform the atomic operation in memory. */
4913         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4914                                      s->insn->data);
4915     }
4916 
4917     /* Recompute also for atomic case: needed for setting CC. */
4918     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4919 
4920     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4921         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4922     }
4923     return DISAS_NEXT;
4924 }
4925 
4926 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4927 {
4928     o->out = tcg_constant_i64(0);
4929     return DISAS_NEXT;
4930 }
4931 
4932 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4933 {
4934     o->out = tcg_constant_i64(0);
4935     o->out2 = o->out;
4936     return DISAS_NEXT;
4937 }
4938 
4939 #ifndef CONFIG_USER_ONLY
4940 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4941 {
4942     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4943 
4944     gen_helper_clp(tcg_env, r2);
4945     set_cc_static(s);
4946     return DISAS_NEXT;
4947 }
4948 
4949 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4950 {
4951     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4952     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4953 
4954     gen_helper_pcilg(tcg_env, r1, r2);
4955     set_cc_static(s);
4956     return DISAS_NEXT;
4957 }
4958 
4959 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4960 {
4961     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4962     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4963 
4964     gen_helper_pcistg(tcg_env, r1, r2);
4965     set_cc_static(s);
4966     return DISAS_NEXT;
4967 }
4968 
4969 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4970 {
4971     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4972     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4973 
4974     gen_helper_stpcifc(tcg_env, r1, o->addr1, ar);
4975     set_cc_static(s);
4976     return DISAS_NEXT;
4977 }
4978 
4979 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4980 {
4981     gen_helper_sic(tcg_env, o->in1, o->in2);
4982     return DISAS_NEXT;
4983 }
4984 
4985 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4986 {
4987     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4988     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4989 
4990     gen_helper_rpcit(tcg_env, r1, r2);
4991     set_cc_static(s);
4992     return DISAS_NEXT;
4993 }
4994 
4995 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4996 {
4997     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4998     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4999     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5000 
5001     gen_helper_pcistb(tcg_env, r1, r3, o->addr1, ar);
5002     set_cc_static(s);
5003     return DISAS_NEXT;
5004 }
5005 
5006 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5007 {
5008     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
5009     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
5010 
5011     gen_helper_mpcifc(tcg_env, r1, o->addr1, ar);
5012     set_cc_static(s);
5013     return DISAS_NEXT;
5014 }
5015 #endif
5016 
5017 #include "translate_vx.c.inc"
5018 
5019 /* ====================================================================== */
5020 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5021    the original inputs), update the various cc data structures in order to
5022    be able to compute the new condition code.  */
5023 
5024 static void cout_abs32(DisasContext *s, DisasOps *o)
5025 {
5026     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5027 }
5028 
5029 static void cout_abs64(DisasContext *s, DisasOps *o)
5030 {
5031     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5032 }
5033 
5034 static void cout_adds32(DisasContext *s, DisasOps *o)
5035 {
5036     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5037 }
5038 
5039 static void cout_adds64(DisasContext *s, DisasOps *o)
5040 {
5041     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5042 }
5043 
5044 static void cout_addu32(DisasContext *s, DisasOps *o)
5045 {
5046     tcg_gen_shri_i64(cc_src, o->out, 32);
5047     tcg_gen_ext32u_i64(cc_dst, o->out);
5048     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5049 }
5050 
5051 static void cout_addu64(DisasContext *s, DisasOps *o)
5052 {
5053     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5054 }
5055 
5056 static void cout_cmps32(DisasContext *s, DisasOps *o)
5057 {
5058     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5059 }
5060 
5061 static void cout_cmps64(DisasContext *s, DisasOps *o)
5062 {
5063     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5064 }
5065 
5066 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5067 {
5068     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5069 }
5070 
5071 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5072 {
5073     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5074 }
5075 
5076 static void cout_f32(DisasContext *s, DisasOps *o)
5077 {
5078     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5079 }
5080 
5081 static void cout_f64(DisasContext *s, DisasOps *o)
5082 {
5083     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5084 }
5085 
5086 static void cout_f128(DisasContext *s, DisasOps *o)
5087 {
5088     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5089 }
5090 
5091 static void cout_nabs32(DisasContext *s, DisasOps *o)
5092 {
5093     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5094 }
5095 
5096 static void cout_nabs64(DisasContext *s, DisasOps *o)
5097 {
5098     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5099 }
5100 
5101 static void cout_neg32(DisasContext *s, DisasOps *o)
5102 {
5103     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5104 }
5105 
5106 static void cout_neg64(DisasContext *s, DisasOps *o)
5107 {
5108     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5109 }
5110 
5111 static void cout_nz32(DisasContext *s, DisasOps *o)
5112 {
5113     tcg_gen_ext32u_i64(cc_dst, o->out);
5114     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5115 }
5116 
5117 static void cout_nz64(DisasContext *s, DisasOps *o)
5118 {
5119     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5120 }
5121 
5122 static void cout_s32(DisasContext *s, DisasOps *o)
5123 {
5124     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5125 }
5126 
5127 static void cout_s64(DisasContext *s, DisasOps *o)
5128 {
5129     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5130 }
5131 
5132 static void cout_subs32(DisasContext *s, DisasOps *o)
5133 {
5134     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5135 }
5136 
5137 static void cout_subs64(DisasContext *s, DisasOps *o)
5138 {
5139     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5140 }
5141 
5142 static void cout_subu32(DisasContext *s, DisasOps *o)
5143 {
5144     tcg_gen_sari_i64(cc_src, o->out, 32);
5145     tcg_gen_ext32u_i64(cc_dst, o->out);
5146     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5147 }
5148 
5149 static void cout_subu64(DisasContext *s, DisasOps *o)
5150 {
5151     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5152 }
5153 
5154 static void cout_tm32(DisasContext *s, DisasOps *o)
5155 {
5156     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5157 }
5158 
5159 static void cout_tm64(DisasContext *s, DisasOps *o)
5160 {
5161     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5162 }
5163 
5164 static void cout_muls32(DisasContext *s, DisasOps *o)
5165 {
5166     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5167 }
5168 
5169 static void cout_muls64(DisasContext *s, DisasOps *o)
5170 {
5171     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5172     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5173 }
5174 
5175 /* ====================================================================== */
5176 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5177    with the TCG register to which we will write.  Used in combination with
5178    the "wout" generators, in some cases we need a new temporary, and in
5179    some cases we can write to a TCG global.  */
5180 
5181 static void prep_new(DisasContext *s, DisasOps *o)
5182 {
5183     o->out = tcg_temp_new_i64();
5184 }
5185 #define SPEC_prep_new 0
5186 
5187 static void prep_new_P(DisasContext *s, DisasOps *o)
5188 {
5189     o->out = tcg_temp_new_i64();
5190     o->out2 = tcg_temp_new_i64();
5191 }
5192 #define SPEC_prep_new_P 0
5193 
5194 static void prep_new_x(DisasContext *s, DisasOps *o)
5195 {
5196     o->out_128 = tcg_temp_new_i128();
5197 }
5198 #define SPEC_prep_new_x 0
5199 
5200 static void prep_r1(DisasContext *s, DisasOps *o)
5201 {
5202     o->out = regs[get_field(s, r1)];
5203 }
5204 #define SPEC_prep_r1 0
5205 
5206 static void prep_r1_P(DisasContext *s, DisasOps *o)
5207 {
5208     int r1 = get_field(s, r1);
5209     o->out = regs[r1];
5210     o->out2 = regs[r1 + 1];
5211 }
5212 #define SPEC_prep_r1_P SPEC_r1_even
5213 
5214 /* ====================================================================== */
5215 /* The "Write OUTput" generators.  These generally perform some non-trivial
5216    copy of data to TCG globals, or to main memory.  The trivial cases are
5217    generally handled by having a "prep" generator install the TCG global
5218    as the destination of the operation.  */
5219 
5220 static void wout_r1(DisasContext *s, DisasOps *o)
5221 {
5222     store_reg(get_field(s, r1), o->out);
5223 }
5224 #define SPEC_wout_r1 0
5225 
5226 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5227 {
5228     store_reg(get_field(s, r1), o->out2);
5229 }
5230 #define SPEC_wout_out2_r1 0
5231 
5232 static void wout_r1_8(DisasContext *s, DisasOps *o)
5233 {
5234     int r1 = get_field(s, r1);
5235     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5236 }
5237 #define SPEC_wout_r1_8 0
5238 
5239 static void wout_r1_16(DisasContext *s, DisasOps *o)
5240 {
5241     int r1 = get_field(s, r1);
5242     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5243 }
5244 #define SPEC_wout_r1_16 0
5245 
5246 static void wout_r1_32(DisasContext *s, DisasOps *o)
5247 {
5248     store_reg32_i64(get_field(s, r1), o->out);
5249 }
5250 #define SPEC_wout_r1_32 0
5251 
5252 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5253 {
5254     store_reg32h_i64(get_field(s, r1), o->out);
5255 }
5256 #define SPEC_wout_r1_32h 0
5257 
5258 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5259 {
5260     int r1 = get_field(s, r1);
5261     store_reg32_i64(r1, o->out);
5262     store_reg32_i64(r1 + 1, o->out2);
5263 }
5264 #define SPEC_wout_r1_P32 SPEC_r1_even
5265 
5266 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5267 {
5268     int r1 = get_field(s, r1);
5269     TCGv_i64 t = tcg_temp_new_i64();
5270     store_reg32_i64(r1 + 1, o->out);
5271     tcg_gen_shri_i64(t, o->out, 32);
5272     store_reg32_i64(r1, t);
5273 }
5274 #define SPEC_wout_r1_D32 SPEC_r1_even
5275 
5276 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5277 {
5278     int r1 = get_field(s, r1);
5279     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5280 }
5281 #define SPEC_wout_r1_D64 SPEC_r1_even
5282 
5283 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5284 {
5285     int r3 = get_field(s, r3);
5286     store_reg32_i64(r3, o->out);
5287     store_reg32_i64(r3 + 1, o->out2);
5288 }
5289 #define SPEC_wout_r3_P32 SPEC_r3_even
5290 
5291 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5292 {
5293     int r3 = get_field(s, r3);
5294     store_reg(r3, o->out);
5295     store_reg(r3 + 1, o->out2);
5296 }
5297 #define SPEC_wout_r3_P64 SPEC_r3_even
5298 
5299 static void wout_e1(DisasContext *s, DisasOps *o)
5300 {
5301     store_freg32_i64(get_field(s, r1), o->out);
5302 }
5303 #define SPEC_wout_e1 0
5304 
5305 static void wout_f1(DisasContext *s, DisasOps *o)
5306 {
5307     store_freg(get_field(s, r1), o->out);
5308 }
5309 #define SPEC_wout_f1 0
5310 
5311 static void wout_x1(DisasContext *s, DisasOps *o)
5312 {
5313     int f1 = get_field(s, r1);
5314 
5315     /* Split out_128 into out+out2 for cout_f128. */
5316     tcg_debug_assert(o->out == NULL);
5317     o->out = tcg_temp_new_i64();
5318     o->out2 = tcg_temp_new_i64();
5319 
5320     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5321     store_freg(f1, o->out);
5322     store_freg(f1 + 2, o->out2);
5323 }
5324 #define SPEC_wout_x1 SPEC_r1_f128
5325 
5326 static void wout_x1_P(DisasContext *s, DisasOps *o)
5327 {
5328     int f1 = get_field(s, r1);
5329     store_freg(f1, o->out);
5330     store_freg(f1 + 2, o->out2);
5331 }
5332 #define SPEC_wout_x1_P SPEC_r1_f128
5333 
5334 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5335 {
5336     if (get_field(s, r1) != get_field(s, r2)) {
5337         store_reg32_i64(get_field(s, r1), o->out);
5338     }
5339 }
5340 #define SPEC_wout_cond_r1r2_32 0
5341 
5342 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5343 {
5344     if (get_field(s, r1) != get_field(s, r2)) {
5345         store_freg32_i64(get_field(s, r1), o->out);
5346     }
5347 }
5348 #define SPEC_wout_cond_e1e2 0
5349 
5350 static void wout_m1_8(DisasContext *s, DisasOps *o)
5351 {
5352     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5353 }
5354 #define SPEC_wout_m1_8 0
5355 
5356 static void wout_m1_16(DisasContext *s, DisasOps *o)
5357 {
5358     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5359 }
5360 #define SPEC_wout_m1_16 0
5361 
5362 #ifndef CONFIG_USER_ONLY
5363 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5364 {
5365     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5366 }
5367 #define SPEC_wout_m1_16a 0
5368 #endif
5369 
5370 static void wout_m1_32(DisasContext *s, DisasOps *o)
5371 {
5372     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5373 }
5374 #define SPEC_wout_m1_32 0
5375 
5376 #ifndef CONFIG_USER_ONLY
5377 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5378 {
5379     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5380 }
5381 #define SPEC_wout_m1_32a 0
5382 #endif
5383 
5384 static void wout_m1_64(DisasContext *s, DisasOps *o)
5385 {
5386     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5387 }
5388 #define SPEC_wout_m1_64 0
5389 
5390 #ifndef CONFIG_USER_ONLY
5391 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5392 {
5393     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5394 }
5395 #define SPEC_wout_m1_64a 0
5396 #endif
5397 
5398 static void wout_m2_32(DisasContext *s, DisasOps *o)
5399 {
5400     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5401 }
5402 #define SPEC_wout_m2_32 0
5403 
5404 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5405 {
5406     store_reg(get_field(s, r1), o->in2);
5407 }
5408 #define SPEC_wout_in2_r1 0
5409 
5410 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5411 {
5412     store_reg32_i64(get_field(s, r1), o->in2);
5413 }
5414 #define SPEC_wout_in2_r1_32 0
5415 
5416 /* ====================================================================== */
5417 /* The "INput 1" generators.  These load the first operand to an insn.  */
5418 
5419 static void in1_r1(DisasContext *s, DisasOps *o)
5420 {
5421     o->in1 = load_reg(get_field(s, r1));
5422 }
5423 #define SPEC_in1_r1 0
5424 
5425 static void in1_r1_o(DisasContext *s, DisasOps *o)
5426 {
5427     o->in1 = regs[get_field(s, r1)];
5428 }
5429 #define SPEC_in1_r1_o 0
5430 
5431 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5432 {
5433     o->in1 = tcg_temp_new_i64();
5434     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5435 }
5436 #define SPEC_in1_r1_32s 0
5437 
5438 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5439 {
5440     o->in1 = tcg_temp_new_i64();
5441     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5442 }
5443 #define SPEC_in1_r1_32u 0
5444 
5445 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5446 {
5447     o->in1 = tcg_temp_new_i64();
5448     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5449 }
5450 #define SPEC_in1_r1_sr32 0
5451 
5452 static void in1_r1p1(DisasContext *s, DisasOps *o)
5453 {
5454     o->in1 = load_reg(get_field(s, r1) + 1);
5455 }
5456 #define SPEC_in1_r1p1 SPEC_r1_even
5457 
5458 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5459 {
5460     o->in1 = regs[get_field(s, r1) + 1];
5461 }
5462 #define SPEC_in1_r1p1_o SPEC_r1_even
5463 
5464 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5465 {
5466     o->in1 = tcg_temp_new_i64();
5467     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5468 }
5469 #define SPEC_in1_r1p1_32s SPEC_r1_even
5470 
5471 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5472 {
5473     o->in1 = tcg_temp_new_i64();
5474     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5475 }
5476 #define SPEC_in1_r1p1_32u SPEC_r1_even
5477 
5478 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5479 {
5480     int r1 = get_field(s, r1);
5481     o->in1 = tcg_temp_new_i64();
5482     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5483 }
5484 #define SPEC_in1_r1_D32 SPEC_r1_even
5485 
5486 static void in1_r2(DisasContext *s, DisasOps *o)
5487 {
5488     o->in1 = load_reg(get_field(s, r2));
5489 }
5490 #define SPEC_in1_r2 0
5491 
5492 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5493 {
5494     o->in1 = tcg_temp_new_i64();
5495     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5496 }
5497 #define SPEC_in1_r2_sr32 0
5498 
5499 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5500 {
5501     o->in1 = tcg_temp_new_i64();
5502     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5503 }
5504 #define SPEC_in1_r2_32u 0
5505 
5506 static void in1_r3(DisasContext *s, DisasOps *o)
5507 {
5508     o->in1 = load_reg(get_field(s, r3));
5509 }
5510 #define SPEC_in1_r3 0
5511 
5512 static void in1_r3_o(DisasContext *s, DisasOps *o)
5513 {
5514     o->in1 = regs[get_field(s, r3)];
5515 }
5516 #define SPEC_in1_r3_o 0
5517 
5518 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5519 {
5520     o->in1 = tcg_temp_new_i64();
5521     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5522 }
5523 #define SPEC_in1_r3_32s 0
5524 
5525 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5526 {
5527     o->in1 = tcg_temp_new_i64();
5528     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5529 }
5530 #define SPEC_in1_r3_32u 0
5531 
5532 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5533 {
5534     int r3 = get_field(s, r3);
5535     o->in1 = tcg_temp_new_i64();
5536     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5537 }
5538 #define SPEC_in1_r3_D32 SPEC_r3_even
5539 
5540 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5541 {
5542     o->in1 = tcg_temp_new_i64();
5543     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5544 }
5545 #define SPEC_in1_r3_sr32 0
5546 
5547 static void in1_e1(DisasContext *s, DisasOps *o)
5548 {
5549     o->in1 = load_freg32_i64(get_field(s, r1));
5550 }
5551 #define SPEC_in1_e1 0
5552 
5553 static void in1_f1(DisasContext *s, DisasOps *o)
5554 {
5555     o->in1 = load_freg(get_field(s, r1));
5556 }
5557 #define SPEC_in1_f1 0
5558 
5559 static void in1_x1(DisasContext *s, DisasOps *o)
5560 {
5561     o->in1_128 = load_freg_128(get_field(s, r1));
5562 }
5563 #define SPEC_in1_x1 SPEC_r1_f128
5564 
5565 /* Load the high double word of an extended (128-bit) format FP number */
5566 static void in1_x2h(DisasContext *s, DisasOps *o)
5567 {
5568     o->in1 = load_freg(get_field(s, r2));
5569 }
5570 #define SPEC_in1_x2h SPEC_r2_f128
5571 
5572 static void in1_f3(DisasContext *s, DisasOps *o)
5573 {
5574     o->in1 = load_freg(get_field(s, r3));
5575 }
5576 #define SPEC_in1_f3 0
5577 
5578 static void in1_la1(DisasContext *s, DisasOps *o)
5579 {
5580     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5581 }
5582 #define SPEC_in1_la1 0
5583 
5584 static void in1_la2(DisasContext *s, DisasOps *o)
5585 {
5586     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5587     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5588 }
5589 #define SPEC_in1_la2 0
5590 
5591 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5592 {
5593     in1_la1(s, o);
5594     o->in1 = tcg_temp_new_i64();
5595     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5596 }
5597 #define SPEC_in1_m1_8u 0
5598 
5599 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5600 {
5601     in1_la1(s, o);
5602     o->in1 = tcg_temp_new_i64();
5603     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5604 }
5605 #define SPEC_in1_m1_16s 0
5606 
5607 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5608 {
5609     in1_la1(s, o);
5610     o->in1 = tcg_temp_new_i64();
5611     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5612 }
5613 #define SPEC_in1_m1_16u 0
5614 
5615 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5616 {
5617     in1_la1(s, o);
5618     o->in1 = tcg_temp_new_i64();
5619     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5620 }
5621 #define SPEC_in1_m1_32s 0
5622 
5623 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5624 {
5625     in1_la1(s, o);
5626     o->in1 = tcg_temp_new_i64();
5627     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5628 }
5629 #define SPEC_in1_m1_32u 0
5630 
5631 static void in1_m1_64(DisasContext *s, DisasOps *o)
5632 {
5633     in1_la1(s, o);
5634     o->in1 = tcg_temp_new_i64();
5635     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5636 }
5637 #define SPEC_in1_m1_64 0
5638 
5639 /* ====================================================================== */
5640 /* The "INput 2" generators.  These load the second operand to an insn.  */
5641 
5642 static void in2_r1_o(DisasContext *s, DisasOps *o)
5643 {
5644     o->in2 = regs[get_field(s, r1)];
5645 }
5646 #define SPEC_in2_r1_o 0
5647 
5648 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5649 {
5650     o->in2 = tcg_temp_new_i64();
5651     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5652 }
5653 #define SPEC_in2_r1_16u 0
5654 
5655 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5656 {
5657     o->in2 = tcg_temp_new_i64();
5658     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5659 }
5660 #define SPEC_in2_r1_32u 0
5661 
5662 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5663 {
5664     int r1 = get_field(s, r1);
5665     o->in2 = tcg_temp_new_i64();
5666     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5667 }
5668 #define SPEC_in2_r1_D32 SPEC_r1_even
5669 
5670 static void in2_r2(DisasContext *s, DisasOps *o)
5671 {
5672     o->in2 = load_reg(get_field(s, r2));
5673 }
5674 #define SPEC_in2_r2 0
5675 
5676 static void in2_r2_o(DisasContext *s, DisasOps *o)
5677 {
5678     o->in2 = regs[get_field(s, r2)];
5679 }
5680 #define SPEC_in2_r2_o 0
5681 
5682 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5683 {
5684     int r2 = get_field(s, r2);
5685     if (r2 != 0) {
5686         o->in2 = load_reg(r2);
5687     }
5688 }
5689 #define SPEC_in2_r2_nz 0
5690 
5691 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5692 {
5693     o->in2 = tcg_temp_new_i64();
5694     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5695 }
5696 #define SPEC_in2_r2_8s 0
5697 
5698 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5699 {
5700     o->in2 = tcg_temp_new_i64();
5701     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5702 }
5703 #define SPEC_in2_r2_8u 0
5704 
5705 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5706 {
5707     o->in2 = tcg_temp_new_i64();
5708     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5709 }
5710 #define SPEC_in2_r2_16s 0
5711 
5712 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5713 {
5714     o->in2 = tcg_temp_new_i64();
5715     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5716 }
5717 #define SPEC_in2_r2_16u 0
5718 
5719 static void in2_r3(DisasContext *s, DisasOps *o)
5720 {
5721     o->in2 = load_reg(get_field(s, r3));
5722 }
5723 #define SPEC_in2_r3 0
5724 
5725 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5726 {
5727     int r3 = get_field(s, r3);
5728     o->in2_128 = tcg_temp_new_i128();
5729     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5730 }
5731 #define SPEC_in2_r3_D64 SPEC_r3_even
5732 
5733 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5734 {
5735     o->in2 = tcg_temp_new_i64();
5736     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5737 }
5738 #define SPEC_in2_r3_sr32 0
5739 
5740 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5741 {
5742     o->in2 = tcg_temp_new_i64();
5743     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5744 }
5745 #define SPEC_in2_r3_32u 0
5746 
5747 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5748 {
5749     o->in2 = tcg_temp_new_i64();
5750     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5751 }
5752 #define SPEC_in2_r2_32s 0
5753 
5754 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5755 {
5756     o->in2 = tcg_temp_new_i64();
5757     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5758 }
5759 #define SPEC_in2_r2_32u 0
5760 
5761 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5762 {
5763     o->in2 = tcg_temp_new_i64();
5764     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5765 }
5766 #define SPEC_in2_r2_sr32 0
5767 
5768 static void in2_e2(DisasContext *s, DisasOps *o)
5769 {
5770     o->in2 = load_freg32_i64(get_field(s, r2));
5771 }
5772 #define SPEC_in2_e2 0
5773 
5774 static void in2_f2(DisasContext *s, DisasOps *o)
5775 {
5776     o->in2 = load_freg(get_field(s, r2));
5777 }
5778 #define SPEC_in2_f2 0
5779 
5780 static void in2_x2(DisasContext *s, DisasOps *o)
5781 {
5782     o->in2_128 = load_freg_128(get_field(s, r2));
5783 }
5784 #define SPEC_in2_x2 SPEC_r2_f128
5785 
5786 /* Load the low double word of an extended (128-bit) format FP number */
5787 static void in2_x2l(DisasContext *s, DisasOps *o)
5788 {
5789     o->in2 = load_freg(get_field(s, r2) + 2);
5790 }
5791 #define SPEC_in2_x2l SPEC_r2_f128
5792 
5793 static void in2_ra2(DisasContext *s, DisasOps *o)
5794 {
5795     int r2 = get_field(s, r2);
5796 
5797     /* Note: *don't* treat !r2 as 0, use the reg value. */
5798     o->in2 = tcg_temp_new_i64();
5799     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5800 }
5801 #define SPEC_in2_ra2 0
5802 
5803 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5804 {
5805     return in2_ra2(s, o);
5806 }
5807 #define SPEC_in2_ra2_E SPEC_r2_even
5808 
5809 static void in2_a2(DisasContext *s, DisasOps *o)
5810 {
5811     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5812     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5813 }
5814 #define SPEC_in2_a2 0
5815 
5816 static TCGv gen_ri2(DisasContext *s)
5817 {
5818     TCGv ri2 = NULL;
5819     bool is_imm;
5820     int imm;
5821 
5822     disas_jdest(s, i2, is_imm, imm, ri2);
5823     if (is_imm) {
5824         ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5825     }
5826 
5827     return ri2;
5828 }
5829 
5830 static void in2_ri2(DisasContext *s, DisasOps *o)
5831 {
5832     o->in2 = gen_ri2(s);
5833 }
5834 #define SPEC_in2_ri2 0
5835 
5836 static void in2_sh(DisasContext *s, DisasOps *o)
5837 {
5838     int b2 = get_field(s, b2);
5839     int d2 = get_field(s, d2);
5840 
5841     if (b2 == 0) {
5842         o->in2 = tcg_constant_i64(d2 & 0x3f);
5843     } else {
5844         o->in2 = get_address(s, 0, b2, d2);
5845         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5846     }
5847 }
5848 #define SPEC_in2_sh 0
5849 
5850 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5851 {
5852     in2_a2(s, o);
5853     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5854 }
5855 #define SPEC_in2_m2_8u 0
5856 
5857 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5858 {
5859     in2_a2(s, o);
5860     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5861 }
5862 #define SPEC_in2_m2_16s 0
5863 
5864 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5865 {
5866     in2_a2(s, o);
5867     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5868 }
5869 #define SPEC_in2_m2_16u 0
5870 
5871 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5872 {
5873     in2_a2(s, o);
5874     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5875 }
5876 #define SPEC_in2_m2_32s 0
5877 
5878 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5879 {
5880     in2_a2(s, o);
5881     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5882 }
5883 #define SPEC_in2_m2_32u 0
5884 
5885 #ifndef CONFIG_USER_ONLY
5886 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5887 {
5888     in2_a2(s, o);
5889     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5890 }
5891 #define SPEC_in2_m2_32ua 0
5892 #endif
5893 
5894 static void in2_m2_64(DisasContext *s, DisasOps *o)
5895 {
5896     in2_a2(s, o);
5897     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5898 }
5899 #define SPEC_in2_m2_64 0
5900 
5901 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5902 {
5903     in2_a2(s, o);
5904     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5905     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5906 }
5907 #define SPEC_in2_m2_64w 0
5908 
5909 #ifndef CONFIG_USER_ONLY
5910 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5911 {
5912     in2_a2(s, o);
5913     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5914 }
5915 #define SPEC_in2_m2_64a 0
5916 #endif
5917 
5918 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5919 {
5920     o->in2 = tcg_temp_new_i64();
5921     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5922 }
5923 #define SPEC_in2_mri2_16s 0
5924 
5925 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5926 {
5927     o->in2 = tcg_temp_new_i64();
5928     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5929 }
5930 #define SPEC_in2_mri2_16u 0
5931 
5932 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5933 {
5934     o->in2 = tcg_temp_new_i64();
5935     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5936                        MO_TESL | MO_ALIGN);
5937 }
5938 #define SPEC_in2_mri2_32s 0
5939 
5940 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5941 {
5942     o->in2 = tcg_temp_new_i64();
5943     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5944                        MO_TEUL | MO_ALIGN);
5945 }
5946 #define SPEC_in2_mri2_32u 0
5947 
5948 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5949 {
5950     o->in2 = tcg_temp_new_i64();
5951     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5952                         MO_TEUQ | MO_ALIGN);
5953 }
5954 #define SPEC_in2_mri2_64 0
5955 
5956 static void in2_i2(DisasContext *s, DisasOps *o)
5957 {
5958     o->in2 = tcg_constant_i64(get_field(s, i2));
5959 }
5960 #define SPEC_in2_i2 0
5961 
5962 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5963 {
5964     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5965 }
5966 #define SPEC_in2_i2_8u 0
5967 
5968 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5969 {
5970     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5971 }
5972 #define SPEC_in2_i2_16u 0
5973 
5974 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5975 {
5976     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5977 }
5978 #define SPEC_in2_i2_32u 0
5979 
5980 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5981 {
5982     uint64_t i2 = (uint16_t)get_field(s, i2);
5983     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5984 }
5985 #define SPEC_in2_i2_16u_shl 0
5986 
5987 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5988 {
5989     uint64_t i2 = (uint32_t)get_field(s, i2);
5990     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5991 }
5992 #define SPEC_in2_i2_32u_shl 0
5993 
5994 #ifndef CONFIG_USER_ONLY
5995 static void in2_insn(DisasContext *s, DisasOps *o)
5996 {
5997     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5998 }
5999 #define SPEC_in2_insn 0
6000 #endif
6001 
6002 /* ====================================================================== */
6003 
6004 /* Find opc within the table of insns.  This is formulated as a switch
6005    statement so that (1) we get compile-time notice of cut-paste errors
6006    for duplicated opcodes, and (2) the compiler generates the binary
6007    search tree, rather than us having to post-process the table.  */
6008 
6009 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6010     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6011 
6012 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6013     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6014 
6015 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6016     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6017 
6018 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6019 
6020 enum DisasInsnEnum {
6021 #include "insn-data.h.inc"
6022 };
6023 
6024 #undef E
6025 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6026     .opc = OPC,                                                             \
6027     .flags = FL,                                                            \
6028     .fmt = FMT_##FT,                                                        \
6029     .fac = FAC_##FC,                                                        \
6030     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6031     .name = #NM,                                                            \
6032     .help_in1 = in1_##I1,                                                   \
6033     .help_in2 = in2_##I2,                                                   \
6034     .help_prep = prep_##P,                                                  \
6035     .help_wout = wout_##W,                                                  \
6036     .help_cout = cout_##CC,                                                 \
6037     .help_op = op_##OP,                                                     \
6038     .data = D                                                               \
6039  },
6040 
6041 /* Allow 0 to be used for NULL in the table below.  */
6042 #define in1_0  NULL
6043 #define in2_0  NULL
6044 #define prep_0  NULL
6045 #define wout_0  NULL
6046 #define cout_0  NULL
6047 #define op_0  NULL
6048 
6049 #define SPEC_in1_0 0
6050 #define SPEC_in2_0 0
6051 #define SPEC_prep_0 0
6052 #define SPEC_wout_0 0
6053 
6054 /* Give smaller names to the various facilities.  */
6055 #define FAC_Z           S390_FEAT_ZARCH
6056 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6057 #define FAC_DFP         S390_FEAT_DFP
6058 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6059 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6060 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6061 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6062 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6063 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6064 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6065 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6066 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6067 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6068 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6069 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6070 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6071 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6072 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6073 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6074 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6075 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6076 #define FAC_SFLE        S390_FEAT_STFLE
6077 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6078 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6079 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6080 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6081 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6082 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6083 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6084 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6085 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6086 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6087 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6088 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6089 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6090 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6091 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6092 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6093 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6094 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6095 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6096 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6097 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6098 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6099 
6100 static const DisasInsn insn_info[] = {
6101 #include "insn-data.h.inc"
6102 };
6103 
6104 #undef E
6105 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6106     case OPC: return &insn_info[insn_ ## NM];
6107 
6108 static const DisasInsn *lookup_opc(uint16_t opc)
6109 {
6110     switch (opc) {
6111 #include "insn-data.h.inc"
6112     default:
6113         return NULL;
6114     }
6115 }
6116 
6117 #undef F
6118 #undef E
6119 #undef D
6120 #undef C
6121 
6122 /* Extract a field from the insn.  The INSN should be left-aligned in
6123    the uint64_t so that we can more easily utilize the big-bit-endian
6124    definitions we extract from the Principals of Operation.  */
6125 
6126 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6127 {
6128     uint32_t r, m;
6129 
6130     if (f->size == 0) {
6131         return;
6132     }
6133 
6134     /* Zero extract the field from the insn.  */
6135     r = (insn << f->beg) >> (64 - f->size);
6136 
6137     /* Sign-extend, or un-swap the field as necessary.  */
6138     switch (f->type) {
6139     case 0: /* unsigned */
6140         break;
6141     case 1: /* signed */
6142         assert(f->size <= 32);
6143         m = 1u << (f->size - 1);
6144         r = (r ^ m) - m;
6145         break;
6146     case 2: /* dl+dh split, signed 20 bit. */
6147         r = ((int8_t)r << 12) | (r >> 8);
6148         break;
6149     case 3: /* MSB stored in RXB */
6150         g_assert(f->size == 4);
6151         switch (f->beg) {
6152         case 8:
6153             r |= extract64(insn, 63 - 36, 1) << 4;
6154             break;
6155         case 12:
6156             r |= extract64(insn, 63 - 37, 1) << 4;
6157             break;
6158         case 16:
6159             r |= extract64(insn, 63 - 38, 1) << 4;
6160             break;
6161         case 32:
6162             r |= extract64(insn, 63 - 39, 1) << 4;
6163             break;
6164         default:
6165             g_assert_not_reached();
6166         }
6167         break;
6168     default:
6169         abort();
6170     }
6171 
6172     /*
6173      * Validate that the "compressed" encoding we selected above is valid.
6174      * I.e. we haven't made two different original fields overlap.
6175      */
6176     assert(((o->presentC >> f->indexC) & 1) == 0);
6177     o->presentC |= 1 << f->indexC;
6178     o->presentO |= 1 << f->indexO;
6179 
6180     o->c[f->indexC] = r;
6181 }
6182 
6183 /* Lookup the insn at the current PC, extracting the operands into O and
6184    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6185 
6186 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6187 {
6188     uint64_t insn, pc = s->base.pc_next;
6189     int op, op2, ilen;
6190     const DisasInsn *info;
6191 
6192     if (unlikely(s->ex_value)) {
6193         uint64_t be_insn;
6194 
6195         /* Drop the EX data now, so that it's clear on exception paths.  */
6196         tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
6197                        offsetof(CPUS390XState, ex_value));
6198 
6199         /* Extract the values saved by EXECUTE.  */
6200         insn = s->ex_value & 0xffffffffffff0000ull;
6201         ilen = s->ex_value & 0xf;
6202         op = insn >> 56;
6203 
6204         /* Register insn bytes with translator so plugins work. */
6205         be_insn = cpu_to_be64(insn);
6206         translator_fake_ld(&s->base, &be_insn, get_ilen(op));
6207     } else {
6208         insn = ld_code2(env, s, pc);
6209         op = (insn >> 8) & 0xff;
6210         ilen = get_ilen(op);
6211         switch (ilen) {
6212         case 2:
6213             insn = insn << 48;
6214             break;
6215         case 4:
6216             insn = ld_code4(env, s, pc) << 32;
6217             break;
6218         case 6:
6219             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6220             break;
6221         default:
6222             g_assert_not_reached();
6223         }
6224     }
6225     s->pc_tmp = s->base.pc_next + ilen;
6226     s->ilen = ilen;
6227 
6228     /* We can't actually determine the insn format until we've looked up
6229        the full insn opcode.  Which we can't do without locating the
6230        secondary opcode.  Assume by default that OP2 is at bit 40; for
6231        those smaller insns that don't actually have a secondary opcode
6232        this will correctly result in OP2 = 0. */
6233     switch (op) {
6234     case 0x01: /* E */
6235     case 0x80: /* S */
6236     case 0x82: /* S */
6237     case 0x93: /* S */
6238     case 0xb2: /* S, RRF, RRE, IE */
6239     case 0xb3: /* RRE, RRD, RRF */
6240     case 0xb9: /* RRE, RRF */
6241     case 0xe5: /* SSE, SIL */
6242         op2 = (insn << 8) >> 56;
6243         break;
6244     case 0xa5: /* RI */
6245     case 0xa7: /* RI */
6246     case 0xc0: /* RIL */
6247     case 0xc2: /* RIL */
6248     case 0xc4: /* RIL */
6249     case 0xc6: /* RIL */
6250     case 0xc8: /* SSF */
6251     case 0xcc: /* RIL */
6252         op2 = (insn << 12) >> 60;
6253         break;
6254     case 0xc5: /* MII */
6255     case 0xc7: /* SMI */
6256     case 0xd0 ... 0xdf: /* SS */
6257     case 0xe1: /* SS */
6258     case 0xe2: /* SS */
6259     case 0xe8: /* SS */
6260     case 0xe9: /* SS */
6261     case 0xea: /* SS */
6262     case 0xee ... 0xf3: /* SS */
6263     case 0xf8 ... 0xfd: /* SS */
6264         op2 = 0;
6265         break;
6266     default:
6267         op2 = (insn << 40) >> 56;
6268         break;
6269     }
6270 
6271     memset(&s->fields, 0, sizeof(s->fields));
6272     s->fields.raw_insn = insn;
6273     s->fields.op = op;
6274     s->fields.op2 = op2;
6275 
6276     /* Lookup the instruction.  */
6277     info = lookup_opc(op << 8 | op2);
6278     s->insn = info;
6279 
6280     /* If we found it, extract the operands.  */
6281     if (info != NULL) {
6282         DisasFormat fmt = info->fmt;
6283         int i;
6284 
6285         for (i = 0; i < NUM_C_FIELD; ++i) {
6286             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6287         }
6288     }
6289     return info;
6290 }
6291 
6292 static bool is_afp_reg(int reg)
6293 {
6294     return reg % 2 || reg > 6;
6295 }
6296 
6297 static bool is_fp_pair(int reg)
6298 {
6299     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6300     return !(reg & 0x2);
6301 }
6302 
6303 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6304 {
6305     const DisasInsn *insn;
6306     DisasJumpType ret = DISAS_NEXT;
6307     DisasOps o = {};
6308     bool icount = false;
6309 
6310     /* Search for the insn in the table.  */
6311     insn = extract_insn(env, s);
6312 
6313     /* Update insn_start now that we know the ILEN.  */
6314     tcg_set_insn_start_param(s->base.insn_start, 2, s->ilen);
6315 
6316     /* Not found means unimplemented/illegal opcode.  */
6317     if (insn == NULL) {
6318         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6319                       s->fields.op, s->fields.op2);
6320         gen_illegal_opcode(s);
6321         ret = DISAS_NORETURN;
6322         goto out;
6323     }
6324 
6325 #ifndef CONFIG_USER_ONLY
6326     if (s->base.tb->flags & FLAG_MASK_PER) {
6327         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6328         gen_helper_per_ifetch(tcg_env, addr);
6329     }
6330 #endif
6331 
6332     /* process flags */
6333     if (insn->flags) {
6334         /* privileged instruction */
6335         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6336             gen_program_exception(s, PGM_PRIVILEGED);
6337             ret = DISAS_NORETURN;
6338             goto out;
6339         }
6340 
6341         /* if AFP is not enabled, instructions and registers are forbidden */
6342         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6343             uint8_t dxc = 0;
6344 
6345             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6346                 dxc = 1;
6347             }
6348             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6349                 dxc = 1;
6350             }
6351             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6352                 dxc = 1;
6353             }
6354             if (insn->flags & IF_BFP) {
6355                 dxc = 2;
6356             }
6357             if (insn->flags & IF_DFP) {
6358                 dxc = 3;
6359             }
6360             if (insn->flags & IF_VEC) {
6361                 dxc = 0xfe;
6362             }
6363             if (dxc) {
6364                 gen_data_exception(dxc);
6365                 ret = DISAS_NORETURN;
6366                 goto out;
6367             }
6368         }
6369 
6370         /* if vector instructions not enabled, executing them is forbidden */
6371         if (insn->flags & IF_VEC) {
6372             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6373                 gen_data_exception(0xfe);
6374                 ret = DISAS_NORETURN;
6375                 goto out;
6376             }
6377         }
6378 
6379         /* input/output is the special case for icount mode */
6380         if (unlikely(insn->flags & IF_IO)) {
6381             icount = translator_io_start(&s->base);
6382         }
6383     }
6384 
6385     /* Check for insn specification exceptions.  */
6386     if (insn->spec) {
6387         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6388             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6389             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6390             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6391             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6392             gen_program_exception(s, PGM_SPECIFICATION);
6393             ret = DISAS_NORETURN;
6394             goto out;
6395         }
6396     }
6397 
6398     /* Implement the instruction.  */
6399     if (insn->help_in1) {
6400         insn->help_in1(s, &o);
6401     }
6402     if (insn->help_in2) {
6403         insn->help_in2(s, &o);
6404     }
6405     if (insn->help_prep) {
6406         insn->help_prep(s, &o);
6407     }
6408     if (insn->help_op) {
6409         ret = insn->help_op(s, &o);
6410     }
6411     if (ret != DISAS_NORETURN) {
6412         if (insn->help_wout) {
6413             insn->help_wout(s, &o);
6414         }
6415         if (insn->help_cout) {
6416             insn->help_cout(s, &o);
6417         }
6418     }
6419 
6420     /* io should be the last instruction in tb when icount is enabled */
6421     if (unlikely(icount && ret == DISAS_NEXT)) {
6422         ret = DISAS_TOO_MANY;
6423     }
6424 
6425 #ifndef CONFIG_USER_ONLY
6426     if (s->base.tb->flags & FLAG_MASK_PER) {
6427         /* An exception might be triggered, save PSW if not already done.  */
6428         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6429             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6430         }
6431 
6432         /* Call the helper to check for a possible PER exception.  */
6433         gen_helper_per_check_exception(tcg_env);
6434     }
6435 #endif
6436 
6437 out:
6438     /* Advance to the next instruction.  */
6439     s->base.pc_next = s->pc_tmp;
6440     return ret;
6441 }
6442 
6443 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6444 {
6445     DisasContext *dc = container_of(dcbase, DisasContext, base);
6446 
6447     /* 31-bit mode */
6448     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6449         dc->base.pc_first &= 0x7fffffff;
6450         dc->base.pc_next = dc->base.pc_first;
6451     }
6452 
6453     dc->cc_op = CC_OP_DYNAMIC;
6454     dc->ex_value = dc->base.tb->cs_base;
6455     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6456 }
6457 
6458 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6459 {
6460 }
6461 
6462 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6463 {
6464     DisasContext *dc = container_of(dcbase, DisasContext, base);
6465 
6466     /* Delay the set of ilen until we've read the insn. */
6467     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6468 }
6469 
6470 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6471                                 uint64_t pc)
6472 {
6473     uint64_t insn = translator_lduw(env, &s->base, pc);
6474 
6475     return pc + get_ilen((insn >> 8) & 0xff);
6476 }
6477 
6478 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6479 {
6480     CPUS390XState *env = cpu_env(cs);
6481     DisasContext *dc = container_of(dcbase, DisasContext, base);
6482 
6483     dc->base.is_jmp = translate_one(env, dc);
6484     if (dc->base.is_jmp == DISAS_NEXT) {
6485         if (dc->ex_value ||
6486             !is_same_page(dcbase, dc->base.pc_next) ||
6487             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6488             dc->base.is_jmp = DISAS_TOO_MANY;
6489         }
6490     }
6491 }
6492 
6493 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6494 {
6495     DisasContext *dc = container_of(dcbase, DisasContext, base);
6496 
6497     switch (dc->base.is_jmp) {
6498     case DISAS_NORETURN:
6499         break;
6500     case DISAS_TOO_MANY:
6501         update_psw_addr(dc);
6502         /* FALLTHRU */
6503     case DISAS_PC_UPDATED:
6504         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6505            cc op type is in env */
6506         update_cc_op(dc);
6507         /* FALLTHRU */
6508     case DISAS_PC_CC_UPDATED:
6509         /* Exit the TB, either by raising a debug exception or by return.  */
6510         if (dc->exit_to_mainloop) {
6511             tcg_gen_exit_tb(NULL, 0);
6512         } else {
6513             tcg_gen_lookup_and_goto_ptr();
6514         }
6515         break;
6516     default:
6517         g_assert_not_reached();
6518     }
6519 }
6520 
6521 static bool s390x_tr_disas_log(const DisasContextBase *dcbase,
6522                                CPUState *cs, FILE *logfile)
6523 {
6524     DisasContext *dc = container_of(dcbase, DisasContext, base);
6525 
6526     if (unlikely(dc->ex_value)) {
6527         /* The ex_value has been recorded with translator_fake_ld. */
6528         fprintf(logfile, "IN: EXECUTE\n");
6529         target_disas(logfile, cs, &dc->base);
6530         return true;
6531     }
6532     return false;
6533 }
6534 
6535 static const TranslatorOps s390x_tr_ops = {
6536     .init_disas_context = s390x_tr_init_disas_context,
6537     .tb_start           = s390x_tr_tb_start,
6538     .insn_start         = s390x_tr_insn_start,
6539     .translate_insn     = s390x_tr_translate_insn,
6540     .tb_stop            = s390x_tr_tb_stop,
6541     .disas_log          = s390x_tr_disas_log,
6542 };
6543 
6544 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6545                            vaddr pc, void *host_pc)
6546 {
6547     DisasContext dc;
6548 
6549     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6550 }
6551 
6552 void s390x_restore_state_to_opc(CPUState *cs,
6553                                 const TranslationBlock *tb,
6554                                 const uint64_t *data)
6555 {
6556     CPUS390XState *env = cpu_env(cs);
6557     int cc_op = data[1];
6558 
6559     env->psw.addr = data[0];
6560 
6561     /* Update the CC opcode if it is not already up-to-date.  */
6562     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6563         env->cc_op = cc_op;
6564     }
6565 
6566     /* Record ILEN.  */
6567     env->int_pgm_ilen = data[2];
6568 }
6569