xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision 93e0932b)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44 
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48 
49 
50 /* Information that (most) every instruction needs to manipulate.  */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
54 
55 /*
56  * Define a structure to hold the decoded fields.  We'll store each inside
57  * an array indexed by an enum.  In order to conserve memory, we'll arrange
58  * for fields that do not exist at the same time to overlap, thus the "C"
59  * for compact.  For checking purposes there is an "O" for original index
60  * as well that will be applied to availability bitmaps.
61  */
62 
63 enum DisasFieldIndexO {
64     FLD_O_r1,
65     FLD_O_r2,
66     FLD_O_r3,
67     FLD_O_m1,
68     FLD_O_m3,
69     FLD_O_m4,
70     FLD_O_m5,
71     FLD_O_m6,
72     FLD_O_b1,
73     FLD_O_b2,
74     FLD_O_b4,
75     FLD_O_d1,
76     FLD_O_d2,
77     FLD_O_d4,
78     FLD_O_x2,
79     FLD_O_l1,
80     FLD_O_l2,
81     FLD_O_i1,
82     FLD_O_i2,
83     FLD_O_i3,
84     FLD_O_i4,
85     FLD_O_i5,
86     FLD_O_v1,
87     FLD_O_v2,
88     FLD_O_v3,
89     FLD_O_v4,
90 };
91 
92 enum DisasFieldIndexC {
93     FLD_C_r1 = 0,
94     FLD_C_m1 = 0,
95     FLD_C_b1 = 0,
96     FLD_C_i1 = 0,
97     FLD_C_v1 = 0,
98 
99     FLD_C_r2 = 1,
100     FLD_C_b2 = 1,
101     FLD_C_i2 = 1,
102 
103     FLD_C_r3 = 2,
104     FLD_C_m3 = 2,
105     FLD_C_i3 = 2,
106     FLD_C_v3 = 2,
107 
108     FLD_C_m4 = 3,
109     FLD_C_b4 = 3,
110     FLD_C_i4 = 3,
111     FLD_C_l1 = 3,
112     FLD_C_v4 = 3,
113 
114     FLD_C_i5 = 4,
115     FLD_C_d1 = 4,
116     FLD_C_m5 = 4,
117 
118     FLD_C_d2 = 5,
119     FLD_C_m6 = 5,
120 
121     FLD_C_d4 = 6,
122     FLD_C_x2 = 6,
123     FLD_C_l2 = 6,
124     FLD_C_v2 = 6,
125 
126     NUM_C_FIELD = 7
127 };
128 
129 struct DisasFields {
130     uint64_t raw_insn;
131     unsigned op:8;
132     unsigned op2:8;
133     unsigned presentC:16;
134     unsigned int presentO;
135     int c[NUM_C_FIELD];
136 };
137 
138 struct DisasContext {
139     DisasContextBase base;
140     const DisasInsn *insn;
141     TCGOp *insn_start;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152     bool exit_to_mainloop;
153 };
154 
155 /* Information carried about a condition to be evaluated.  */
156 typedef struct {
157     TCGCond cond:8;
158     bool is_64;
159     bool g1;
160     bool g2;
161     union {
162         struct { TCGv_i64 a, b; } s64;
163         struct { TCGv_i32 a, b; } s32;
164     } u;
165 } DisasCompare;
166 
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
171 
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
173 {
174     TCGv_i64 tmp;
175 
176     if (s->base.tb->flags & FLAG_MASK_32) {
177         if (s->base.tb->flags & FLAG_MASK_64) {
178             tcg_gen_movi_i64(out, pc);
179             return;
180         }
181         pc |= 0x80000000;
182     }
183     assert(!(s->base.tb->flags & FLAG_MASK_64));
184     tmp = tcg_const_i64(pc);
185     tcg_gen_deposit_i64(out, out, tmp, 0, 32);
186     tcg_temp_free_i64(tmp);
187 }
188 
189 static TCGv_i64 psw_addr;
190 static TCGv_i64 psw_mask;
191 static TCGv_i64 gbea;
192 
193 static TCGv_i32 cc_op;
194 static TCGv_i64 cc_src;
195 static TCGv_i64 cc_dst;
196 static TCGv_i64 cc_vr;
197 
198 static char cpu_reg_names[16][4];
199 static TCGv_i64 regs[16];
200 
201 void s390x_translate_init(void)
202 {
203     int i;
204 
205     psw_addr = tcg_global_mem_new_i64(cpu_env,
206                                       offsetof(CPUS390XState, psw.addr),
207                                       "psw_addr");
208     psw_mask = tcg_global_mem_new_i64(cpu_env,
209                                       offsetof(CPUS390XState, psw.mask),
210                                       "psw_mask");
211     gbea = tcg_global_mem_new_i64(cpu_env,
212                                   offsetof(CPUS390XState, gbea),
213                                   "gbea");
214 
215     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
216                                    "cc_op");
217     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
218                                     "cc_src");
219     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
220                                     "cc_dst");
221     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
222                                    "cc_vr");
223 
224     for (i = 0; i < 16; i++) {
225         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
226         regs[i] = tcg_global_mem_new(cpu_env,
227                                      offsetof(CPUS390XState, regs[i]),
228                                      cpu_reg_names[i]);
229     }
230 }
231 
232 static inline int vec_full_reg_offset(uint8_t reg)
233 {
234     g_assert(reg < 32);
235     return offsetof(CPUS390XState, vregs[reg][0]);
236 }
237 
238 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
239 {
240     /* Convert element size (es) - e.g. MO_8 - to bytes */
241     const uint8_t bytes = 1 << es;
242     int offs = enr * bytes;
243 
244     /*
245      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246      * of the 16 byte vector, on both, little and big endian systems.
247      *
248      * Big Endian (target/possible host)
249      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
251      * W:  [             0][             1] - [             2][             3]
252      * DW: [                             0] - [                             1]
253      *
254      * Little Endian (possible host)
255      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
257      * W:  [             1][             0] - [             3][             2]
258      * DW: [                             0] - [                             1]
259      *
260      * For 16 byte elements, the two 8 byte halves will not form a host
261      * int128 if the host is little endian, since they're in the wrong order.
262      * Some operations (e.g. xor) do not care. For operations like addition,
263      * the two 8 byte elements have to be loaded separately. Let's force all
264      * 16 byte operations to handle it in a special way.
265      */
266     g_assert(es <= MO_64);
267 #if !HOST_BIG_ENDIAN
268     offs ^= (8 - bytes);
269 #endif
270     return offs + vec_full_reg_offset(reg);
271 }
272 
273 static inline int freg64_offset(uint8_t reg)
274 {
275     g_assert(reg < 16);
276     return vec_reg_offset(reg, 0, MO_64);
277 }
278 
279 static inline int freg32_offset(uint8_t reg)
280 {
281     g_assert(reg < 16);
282     return vec_reg_offset(reg, 0, MO_32);
283 }
284 
285 static TCGv_i64 load_reg(int reg)
286 {
287     TCGv_i64 r = tcg_temp_new_i64();
288     tcg_gen_mov_i64(r, regs[reg]);
289     return r;
290 }
291 
292 static TCGv_i64 load_freg(int reg)
293 {
294     TCGv_i64 r = tcg_temp_new_i64();
295 
296     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
297     return r;
298 }
299 
300 static TCGv_i64 load_freg32_i64(int reg)
301 {
302     TCGv_i64 r = tcg_temp_new_i64();
303 
304     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
305     return r;
306 }
307 
308 static TCGv_i128 load_freg_128(int reg)
309 {
310     TCGv_i64 h = load_freg(reg);
311     TCGv_i64 l = load_freg(reg + 2);
312     TCGv_i128 r = tcg_temp_new_i128();
313 
314     tcg_gen_concat_i64_i128(r, l, h);
315     tcg_temp_free_i64(h);
316     tcg_temp_free_i64(l);
317     return r;
318 }
319 
320 static void store_reg(int reg, TCGv_i64 v)
321 {
322     tcg_gen_mov_i64(regs[reg], v);
323 }
324 
325 static void store_freg(int reg, TCGv_i64 v)
326 {
327     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
328 }
329 
330 static void store_reg32_i64(int reg, TCGv_i64 v)
331 {
332     /* 32 bit register writes keep the upper half */
333     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
334 }
335 
336 static void store_reg32h_i64(int reg, TCGv_i64 v)
337 {
338     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
339 }
340 
341 static void store_freg32_i64(int reg, TCGv_i64 v)
342 {
343     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
344 }
345 
346 static void return_low128(TCGv_i64 dest)
347 {
348     tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
349 }
350 
351 static void update_psw_addr(DisasContext *s)
352 {
353     /* psw.addr */
354     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
355 }
356 
357 static void per_branch(DisasContext *s, bool to_next)
358 {
359 #ifndef CONFIG_USER_ONLY
360     tcg_gen_movi_i64(gbea, s->base.pc_next);
361 
362     if (s->base.tb->flags & FLAG_MASK_PER) {
363         TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
364         gen_helper_per_branch(cpu_env, gbea, next_pc);
365         if (to_next) {
366             tcg_temp_free_i64(next_pc);
367         }
368     }
369 #endif
370 }
371 
372 static void per_branch_cond(DisasContext *s, TCGCond cond,
373                             TCGv_i64 arg1, TCGv_i64 arg2)
374 {
375 #ifndef CONFIG_USER_ONLY
376     if (s->base.tb->flags & FLAG_MASK_PER) {
377         TCGLabel *lab = gen_new_label();
378         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
379 
380         tcg_gen_movi_i64(gbea, s->base.pc_next);
381         gen_helper_per_branch(cpu_env, gbea, psw_addr);
382 
383         gen_set_label(lab);
384     } else {
385         TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
386         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
387         tcg_temp_free_i64(pc);
388     }
389 #endif
390 }
391 
392 static void per_breaking_event(DisasContext *s)
393 {
394     tcg_gen_movi_i64(gbea, s->base.pc_next);
395 }
396 
397 static void update_cc_op(DisasContext *s)
398 {
399     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
400         tcg_gen_movi_i32(cc_op, s->cc_op);
401     }
402 }
403 
404 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
405                                 uint64_t pc)
406 {
407     return (uint64_t)translator_lduw(env, &s->base, pc);
408 }
409 
410 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
411                                 uint64_t pc)
412 {
413     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
414 }
415 
416 static int get_mem_index(DisasContext *s)
417 {
418 #ifdef CONFIG_USER_ONLY
419     return MMU_USER_IDX;
420 #else
421     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
422         return MMU_REAL_IDX;
423     }
424 
425     switch (s->base.tb->flags & FLAG_MASK_ASC) {
426     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
427         return MMU_PRIMARY_IDX;
428     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
429         return MMU_SECONDARY_IDX;
430     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
431         return MMU_HOME_IDX;
432     default:
433         tcg_abort();
434         break;
435     }
436 #endif
437 }
438 
439 static void gen_exception(int excp)
440 {
441     TCGv_i32 tmp = tcg_const_i32(excp);
442     gen_helper_exception(cpu_env, tmp);
443     tcg_temp_free_i32(tmp);
444 }
445 
446 static void gen_program_exception(DisasContext *s, int code)
447 {
448     TCGv_i32 tmp;
449 
450     /* Remember what pgm exception this was.  */
451     tmp = tcg_const_i32(code);
452     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
453     tcg_temp_free_i32(tmp);
454 
455     tmp = tcg_const_i32(s->ilen);
456     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
457     tcg_temp_free_i32(tmp);
458 
459     /* update the psw */
460     update_psw_addr(s);
461 
462     /* Save off cc.  */
463     update_cc_op(s);
464 
465     /* Trigger exception.  */
466     gen_exception(EXCP_PGM);
467 }
468 
469 static inline void gen_illegal_opcode(DisasContext *s)
470 {
471     gen_program_exception(s, PGM_OPERATION);
472 }
473 
474 static inline void gen_data_exception(uint8_t dxc)
475 {
476     TCGv_i32 tmp = tcg_const_i32(dxc);
477     gen_helper_data_exception(cpu_env, tmp);
478     tcg_temp_free_i32(tmp);
479 }
480 
481 static inline void gen_trap(DisasContext *s)
482 {
483     /* Set DXC to 0xff */
484     gen_data_exception(0xff);
485 }
486 
487 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
488                                   int64_t imm)
489 {
490     tcg_gen_addi_i64(dst, src, imm);
491     if (!(s->base.tb->flags & FLAG_MASK_64)) {
492         if (s->base.tb->flags & FLAG_MASK_32) {
493             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
494         } else {
495             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
496         }
497     }
498 }
499 
500 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
501 {
502     TCGv_i64 tmp = tcg_temp_new_i64();
503 
504     /*
505      * Note that d2 is limited to 20 bits, signed.  If we crop negative
506      * displacements early we create larger immediate addends.
507      */
508     if (b2 && x2) {
509         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
510         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
511     } else if (b2) {
512         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
513     } else if (x2) {
514         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
515     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
516         if (s->base.tb->flags & FLAG_MASK_32) {
517             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
518         } else {
519             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
520         }
521     } else {
522         tcg_gen_movi_i64(tmp, d2);
523     }
524 
525     return tmp;
526 }
527 
528 static inline bool live_cc_data(DisasContext *s)
529 {
530     return (s->cc_op != CC_OP_DYNAMIC
531             && s->cc_op != CC_OP_STATIC
532             && s->cc_op > 3);
533 }
534 
535 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
536 {
537     if (live_cc_data(s)) {
538         tcg_gen_discard_i64(cc_src);
539         tcg_gen_discard_i64(cc_dst);
540         tcg_gen_discard_i64(cc_vr);
541     }
542     s->cc_op = CC_OP_CONST0 + val;
543 }
544 
545 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
546 {
547     if (live_cc_data(s)) {
548         tcg_gen_discard_i64(cc_src);
549         tcg_gen_discard_i64(cc_vr);
550     }
551     tcg_gen_mov_i64(cc_dst, dst);
552     s->cc_op = op;
553 }
554 
555 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
556                                   TCGv_i64 dst)
557 {
558     if (live_cc_data(s)) {
559         tcg_gen_discard_i64(cc_vr);
560     }
561     tcg_gen_mov_i64(cc_src, src);
562     tcg_gen_mov_i64(cc_dst, dst);
563     s->cc_op = op;
564 }
565 
566 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
567                                   TCGv_i64 dst, TCGv_i64 vr)
568 {
569     tcg_gen_mov_i64(cc_src, src);
570     tcg_gen_mov_i64(cc_dst, dst);
571     tcg_gen_mov_i64(cc_vr, vr);
572     s->cc_op = op;
573 }
574 
575 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
576 {
577     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
578 }
579 
580 /* CC value is in env->cc_op */
581 static void set_cc_static(DisasContext *s)
582 {
583     if (live_cc_data(s)) {
584         tcg_gen_discard_i64(cc_src);
585         tcg_gen_discard_i64(cc_dst);
586         tcg_gen_discard_i64(cc_vr);
587     }
588     s->cc_op = CC_OP_STATIC;
589 }
590 
591 /* calculates cc into cc_op */
592 static void gen_op_calc_cc(DisasContext *s)
593 {
594     TCGv_i32 local_cc_op = NULL;
595     TCGv_i64 dummy = NULL;
596 
597     switch (s->cc_op) {
598     default:
599         dummy = tcg_const_i64(0);
600         /* FALLTHRU */
601     case CC_OP_ADD_64:
602     case CC_OP_SUB_64:
603     case CC_OP_ADD_32:
604     case CC_OP_SUB_32:
605         local_cc_op = tcg_const_i32(s->cc_op);
606         break;
607     case CC_OP_CONST0:
608     case CC_OP_CONST1:
609     case CC_OP_CONST2:
610     case CC_OP_CONST3:
611     case CC_OP_STATIC:
612     case CC_OP_DYNAMIC:
613         break;
614     }
615 
616     switch (s->cc_op) {
617     case CC_OP_CONST0:
618     case CC_OP_CONST1:
619     case CC_OP_CONST2:
620     case CC_OP_CONST3:
621         /* s->cc_op is the cc value */
622         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
623         break;
624     case CC_OP_STATIC:
625         /* env->cc_op already is the cc value */
626         break;
627     case CC_OP_NZ:
628         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
629         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
630         break;
631     case CC_OP_ABS_64:
632     case CC_OP_NABS_64:
633     case CC_OP_ABS_32:
634     case CC_OP_NABS_32:
635     case CC_OP_LTGT0_32:
636     case CC_OP_LTGT0_64:
637     case CC_OP_COMP_32:
638     case CC_OP_COMP_64:
639     case CC_OP_NZ_F32:
640     case CC_OP_NZ_F64:
641     case CC_OP_FLOGR:
642     case CC_OP_LCBB:
643     case CC_OP_MULS_32:
644         /* 1 argument */
645         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
646         break;
647     case CC_OP_ADDU:
648     case CC_OP_ICM:
649     case CC_OP_LTGT_32:
650     case CC_OP_LTGT_64:
651     case CC_OP_LTUGTU_32:
652     case CC_OP_LTUGTU_64:
653     case CC_OP_TM_32:
654     case CC_OP_TM_64:
655     case CC_OP_SLA:
656     case CC_OP_SUBU:
657     case CC_OP_NZ_F128:
658     case CC_OP_VC:
659     case CC_OP_MULS_64:
660         /* 2 arguments */
661         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
662         break;
663     case CC_OP_ADD_64:
664     case CC_OP_SUB_64:
665     case CC_OP_ADD_32:
666     case CC_OP_SUB_32:
667         /* 3 arguments */
668         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
669         break;
670     case CC_OP_DYNAMIC:
671         /* unknown operation - assume 3 arguments and cc_op in env */
672         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
673         break;
674     default:
675         tcg_abort();
676     }
677 
678     if (local_cc_op) {
679         tcg_temp_free_i32(local_cc_op);
680     }
681     if (dummy) {
682         tcg_temp_free_i64(dummy);
683     }
684 
685     /* We now have cc in cc_op as constant */
686     set_cc_static(s);
687 }
688 
689 static bool use_goto_tb(DisasContext *s, uint64_t dest)
690 {
691     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
692         return false;
693     }
694     return translator_use_goto_tb(&s->base, dest);
695 }
696 
697 static void account_noninline_branch(DisasContext *s, int cc_op)
698 {
699 #ifdef DEBUG_INLINE_BRANCHES
700     inline_branch_miss[cc_op]++;
701 #endif
702 }
703 
704 static void account_inline_branch(DisasContext *s, int cc_op)
705 {
706 #ifdef DEBUG_INLINE_BRANCHES
707     inline_branch_hit[cc_op]++;
708 #endif
709 }
710 
711 /* Table of mask values to comparison codes, given a comparison as input.
712    For such, CC=3 should not be possible.  */
713 static const TCGCond ltgt_cond[16] = {
714     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
715     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
716     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
717     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
718     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
719     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
720     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
721     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
722 };
723 
724 /* Table of mask values to comparison codes, given a logic op as input.
725    For such, only CC=0 and CC=1 should be possible.  */
726 static const TCGCond nz_cond[16] = {
727     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
728     TCG_COND_NEVER, TCG_COND_NEVER,
729     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
730     TCG_COND_NE, TCG_COND_NE,
731     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
732     TCG_COND_EQ, TCG_COND_EQ,
733     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
734     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
735 };
736 
737 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
738    details required to generate a TCG comparison.  */
739 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
740 {
741     TCGCond cond;
742     enum cc_op old_cc_op = s->cc_op;
743 
744     if (mask == 15 || mask == 0) {
745         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
746         c->u.s32.a = cc_op;
747         c->u.s32.b = cc_op;
748         c->g1 = c->g2 = true;
749         c->is_64 = false;
750         return;
751     }
752 
753     /* Find the TCG condition for the mask + cc op.  */
754     switch (old_cc_op) {
755     case CC_OP_LTGT0_32:
756     case CC_OP_LTGT0_64:
757     case CC_OP_LTGT_32:
758     case CC_OP_LTGT_64:
759         cond = ltgt_cond[mask];
760         if (cond == TCG_COND_NEVER) {
761             goto do_dynamic;
762         }
763         account_inline_branch(s, old_cc_op);
764         break;
765 
766     case CC_OP_LTUGTU_32:
767     case CC_OP_LTUGTU_64:
768         cond = tcg_unsigned_cond(ltgt_cond[mask]);
769         if (cond == TCG_COND_NEVER) {
770             goto do_dynamic;
771         }
772         account_inline_branch(s, old_cc_op);
773         break;
774 
775     case CC_OP_NZ:
776         cond = nz_cond[mask];
777         if (cond == TCG_COND_NEVER) {
778             goto do_dynamic;
779         }
780         account_inline_branch(s, old_cc_op);
781         break;
782 
783     case CC_OP_TM_32:
784     case CC_OP_TM_64:
785         switch (mask) {
786         case 8:
787             cond = TCG_COND_EQ;
788             break;
789         case 4 | 2 | 1:
790             cond = TCG_COND_NE;
791             break;
792         default:
793             goto do_dynamic;
794         }
795         account_inline_branch(s, old_cc_op);
796         break;
797 
798     case CC_OP_ICM:
799         switch (mask) {
800         case 8:
801             cond = TCG_COND_EQ;
802             break;
803         case 4 | 2 | 1:
804         case 4 | 2:
805             cond = TCG_COND_NE;
806             break;
807         default:
808             goto do_dynamic;
809         }
810         account_inline_branch(s, old_cc_op);
811         break;
812 
813     case CC_OP_FLOGR:
814         switch (mask & 0xa) {
815         case 8: /* src == 0 -> no one bit found */
816             cond = TCG_COND_EQ;
817             break;
818         case 2: /* src != 0 -> one bit found */
819             cond = TCG_COND_NE;
820             break;
821         default:
822             goto do_dynamic;
823         }
824         account_inline_branch(s, old_cc_op);
825         break;
826 
827     case CC_OP_ADDU:
828     case CC_OP_SUBU:
829         switch (mask) {
830         case 8 | 2: /* result == 0 */
831             cond = TCG_COND_EQ;
832             break;
833         case 4 | 1: /* result != 0 */
834             cond = TCG_COND_NE;
835             break;
836         case 8 | 4: /* !carry (borrow) */
837             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
838             break;
839         case 2 | 1: /* carry (!borrow) */
840             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
841             break;
842         default:
843             goto do_dynamic;
844         }
845         account_inline_branch(s, old_cc_op);
846         break;
847 
848     default:
849     do_dynamic:
850         /* Calculate cc value.  */
851         gen_op_calc_cc(s);
852         /* FALLTHRU */
853 
854     case CC_OP_STATIC:
855         /* Jump based on CC.  We'll load up the real cond below;
856            the assignment here merely avoids a compiler warning.  */
857         account_noninline_branch(s, old_cc_op);
858         old_cc_op = CC_OP_STATIC;
859         cond = TCG_COND_NEVER;
860         break;
861     }
862 
863     /* Load up the arguments of the comparison.  */
864     c->is_64 = true;
865     c->g1 = c->g2 = false;
866     switch (old_cc_op) {
867     case CC_OP_LTGT0_32:
868         c->is_64 = false;
869         c->u.s32.a = tcg_temp_new_i32();
870         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
871         c->u.s32.b = tcg_const_i32(0);
872         break;
873     case CC_OP_LTGT_32:
874     case CC_OP_LTUGTU_32:
875         c->is_64 = false;
876         c->u.s32.a = tcg_temp_new_i32();
877         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
878         c->u.s32.b = tcg_temp_new_i32();
879         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
880         break;
881 
882     case CC_OP_LTGT0_64:
883     case CC_OP_NZ:
884     case CC_OP_FLOGR:
885         c->u.s64.a = cc_dst;
886         c->u.s64.b = tcg_const_i64(0);
887         c->g1 = true;
888         break;
889     case CC_OP_LTGT_64:
890     case CC_OP_LTUGTU_64:
891         c->u.s64.a = cc_src;
892         c->u.s64.b = cc_dst;
893         c->g1 = c->g2 = true;
894         break;
895 
896     case CC_OP_TM_32:
897     case CC_OP_TM_64:
898     case CC_OP_ICM:
899         c->u.s64.a = tcg_temp_new_i64();
900         c->u.s64.b = tcg_const_i64(0);
901         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
902         break;
903 
904     case CC_OP_ADDU:
905     case CC_OP_SUBU:
906         c->is_64 = true;
907         c->u.s64.b = tcg_const_i64(0);
908         c->g1 = true;
909         switch (mask) {
910         case 8 | 2:
911         case 4 | 1: /* result */
912             c->u.s64.a = cc_dst;
913             break;
914         case 8 | 4:
915         case 2 | 1: /* carry */
916             c->u.s64.a = cc_src;
917             break;
918         default:
919             g_assert_not_reached();
920         }
921         break;
922 
923     case CC_OP_STATIC:
924         c->is_64 = false;
925         c->u.s32.a = cc_op;
926         c->g1 = true;
927         switch (mask) {
928         case 0x8 | 0x4 | 0x2: /* cc != 3 */
929             cond = TCG_COND_NE;
930             c->u.s32.b = tcg_const_i32(3);
931             break;
932         case 0x8 | 0x4 | 0x1: /* cc != 2 */
933             cond = TCG_COND_NE;
934             c->u.s32.b = tcg_const_i32(2);
935             break;
936         case 0x8 | 0x2 | 0x1: /* cc != 1 */
937             cond = TCG_COND_NE;
938             c->u.s32.b = tcg_const_i32(1);
939             break;
940         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
941             cond = TCG_COND_EQ;
942             c->g1 = false;
943             c->u.s32.a = tcg_temp_new_i32();
944             c->u.s32.b = tcg_const_i32(0);
945             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
946             break;
947         case 0x8 | 0x4: /* cc < 2 */
948             cond = TCG_COND_LTU;
949             c->u.s32.b = tcg_const_i32(2);
950             break;
951         case 0x8: /* cc == 0 */
952             cond = TCG_COND_EQ;
953             c->u.s32.b = tcg_const_i32(0);
954             break;
955         case 0x4 | 0x2 | 0x1: /* cc != 0 */
956             cond = TCG_COND_NE;
957             c->u.s32.b = tcg_const_i32(0);
958             break;
959         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
960             cond = TCG_COND_NE;
961             c->g1 = false;
962             c->u.s32.a = tcg_temp_new_i32();
963             c->u.s32.b = tcg_const_i32(0);
964             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
965             break;
966         case 0x4: /* cc == 1 */
967             cond = TCG_COND_EQ;
968             c->u.s32.b = tcg_const_i32(1);
969             break;
970         case 0x2 | 0x1: /* cc > 1 */
971             cond = TCG_COND_GTU;
972             c->u.s32.b = tcg_const_i32(1);
973             break;
974         case 0x2: /* cc == 2 */
975             cond = TCG_COND_EQ;
976             c->u.s32.b = tcg_const_i32(2);
977             break;
978         case 0x1: /* cc == 3 */
979             cond = TCG_COND_EQ;
980             c->u.s32.b = tcg_const_i32(3);
981             break;
982         default:
983             /* CC is masked by something else: (8 >> cc) & mask.  */
984             cond = TCG_COND_NE;
985             c->g1 = false;
986             c->u.s32.a = tcg_const_i32(8);
987             c->u.s32.b = tcg_const_i32(0);
988             tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
989             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
990             break;
991         }
992         break;
993 
994     default:
995         abort();
996     }
997     c->cond = cond;
998 }
999 
1000 static void free_compare(DisasCompare *c)
1001 {
1002     if (!c->g1) {
1003         if (c->is_64) {
1004             tcg_temp_free_i64(c->u.s64.a);
1005         } else {
1006             tcg_temp_free_i32(c->u.s32.a);
1007         }
1008     }
1009     if (!c->g2) {
1010         if (c->is_64) {
1011             tcg_temp_free_i64(c->u.s64.b);
1012         } else {
1013             tcg_temp_free_i32(c->u.s32.b);
1014         }
1015     }
1016 }
1017 
1018 /* ====================================================================== */
1019 /* Define the insn format enumeration.  */
1020 #define F0(N)                         FMT_##N,
1021 #define F1(N, X1)                     F0(N)
1022 #define F2(N, X1, X2)                 F0(N)
1023 #define F3(N, X1, X2, X3)             F0(N)
1024 #define F4(N, X1, X2, X3, X4)         F0(N)
1025 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
1026 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1027 
1028 typedef enum {
1029 #include "insn-format.h.inc"
1030 } DisasFormat;
1031 
1032 #undef F0
1033 #undef F1
1034 #undef F2
1035 #undef F3
1036 #undef F4
1037 #undef F5
1038 #undef F6
1039 
1040 /* This is the way fields are to be accessed out of DisasFields.  */
1041 #define have_field(S, F)  have_field1((S), FLD_O_##F)
1042 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1043 
1044 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1045 {
1046     return (s->fields.presentO >> c) & 1;
1047 }
1048 
1049 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1050                       enum DisasFieldIndexC c)
1051 {
1052     assert(have_field1(s, o));
1053     return s->fields.c[c];
1054 }
1055 
1056 /* Describe the layout of each field in each format.  */
1057 typedef struct DisasField {
1058     unsigned int beg:8;
1059     unsigned int size:8;
1060     unsigned int type:2;
1061     unsigned int indexC:6;
1062     enum DisasFieldIndexO indexO:8;
1063 } DisasField;
1064 
1065 typedef struct DisasFormatInfo {
1066     DisasField op[NUM_C_FIELD];
1067 } DisasFormatInfo;
1068 
1069 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1070 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1071 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1072 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1073                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1074 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1075                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1076                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1077 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1078                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1079 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1080                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1081                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1082 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1083 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1084 
1085 #define F0(N)                     { { } },
1086 #define F1(N, X1)                 { { X1 } },
1087 #define F2(N, X1, X2)             { { X1, X2 } },
1088 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1089 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1090 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1091 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1092 
1093 static const DisasFormatInfo format_info[] = {
1094 #include "insn-format.h.inc"
1095 };
1096 
1097 #undef F0
1098 #undef F1
1099 #undef F2
1100 #undef F3
1101 #undef F4
1102 #undef F5
1103 #undef F6
1104 #undef R
1105 #undef M
1106 #undef V
1107 #undef BD
1108 #undef BXD
1109 #undef BDL
1110 #undef BXDL
1111 #undef I
1112 #undef L
1113 
1114 /* Generally, we'll extract operands into this structures, operate upon
1115    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1116    of routines below for more details.  */
1117 typedef struct {
1118     bool g_out, g_out2, g_in1, g_in2;
1119     TCGv_i64 out, out2, in1, in2;
1120     TCGv_i64 addr1;
1121     TCGv_i128 out_128, in1_128, in2_128;
1122 } DisasOps;
1123 
1124 /* Instructions can place constraints on their operands, raising specification
1125    exceptions if they are violated.  To make this easy to automate, each "in1",
1126    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1127    of the following, or 0.  To make this easy to document, we'll put the
1128    SPEC_<name> defines next to <name>.  */
1129 
1130 #define SPEC_r1_even    1
1131 #define SPEC_r2_even    2
1132 #define SPEC_r3_even    4
1133 #define SPEC_r1_f128    8
1134 #define SPEC_r2_f128    16
1135 
1136 /* Return values from translate_one, indicating the state of the TB.  */
1137 
1138 /* We are not using a goto_tb (for whatever reason), but have updated
1139    the PC (for whatever reason), so there's no need to do it again on
1140    exiting the TB.  */
1141 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1142 
1143 /* We have updated the PC and CC values.  */
1144 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1145 
1146 
1147 /* Instruction flags */
1148 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1149 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1150 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1151 #define IF_BFP      0x0008      /* binary floating point instruction */
1152 #define IF_DFP      0x0010      /* decimal floating point instruction */
1153 #define IF_PRIV     0x0020      /* privileged instruction */
1154 #define IF_VEC      0x0040      /* vector instruction */
1155 #define IF_IO       0x0080      /* input/output instruction */
1156 
1157 struct DisasInsn {
1158     unsigned opc:16;
1159     unsigned flags:16;
1160     DisasFormat fmt:8;
1161     unsigned fac:8;
1162     unsigned spec:8;
1163 
1164     const char *name;
1165 
1166     /* Pre-process arguments before HELP_OP.  */
1167     void (*help_in1)(DisasContext *, DisasOps *);
1168     void (*help_in2)(DisasContext *, DisasOps *);
1169     void (*help_prep)(DisasContext *, DisasOps *);
1170 
1171     /*
1172      * Post-process output after HELP_OP.
1173      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1174      */
1175     void (*help_wout)(DisasContext *, DisasOps *);
1176     void (*help_cout)(DisasContext *, DisasOps *);
1177 
1178     /* Implement the operation itself.  */
1179     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1180 
1181     uint64_t data;
1182 };
1183 
1184 /* ====================================================================== */
1185 /* Miscellaneous helpers, used by several operations.  */
1186 
1187 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1188 {
1189     if (dest == s->pc_tmp) {
1190         per_branch(s, true);
1191         return DISAS_NEXT;
1192     }
1193     if (use_goto_tb(s, dest)) {
1194         update_cc_op(s);
1195         per_breaking_event(s);
1196         tcg_gen_goto_tb(0);
1197         tcg_gen_movi_i64(psw_addr, dest);
1198         tcg_gen_exit_tb(s->base.tb, 0);
1199         return DISAS_NORETURN;
1200     } else {
1201         tcg_gen_movi_i64(psw_addr, dest);
1202         per_branch(s, false);
1203         return DISAS_PC_UPDATED;
1204     }
1205 }
1206 
1207 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1208                                  bool is_imm, int imm, TCGv_i64 cdest)
1209 {
1210     DisasJumpType ret;
1211     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1212     TCGLabel *lab;
1213 
1214     /* Take care of the special cases first.  */
1215     if (c->cond == TCG_COND_NEVER) {
1216         ret = DISAS_NEXT;
1217         goto egress;
1218     }
1219     if (is_imm) {
1220         if (dest == s->pc_tmp) {
1221             /* Branch to next.  */
1222             per_branch(s, true);
1223             ret = DISAS_NEXT;
1224             goto egress;
1225         }
1226         if (c->cond == TCG_COND_ALWAYS) {
1227             ret = help_goto_direct(s, dest);
1228             goto egress;
1229         }
1230     } else {
1231         if (!cdest) {
1232             /* E.g. bcr %r0 -> no branch.  */
1233             ret = DISAS_NEXT;
1234             goto egress;
1235         }
1236         if (c->cond == TCG_COND_ALWAYS) {
1237             tcg_gen_mov_i64(psw_addr, cdest);
1238             per_branch(s, false);
1239             ret = DISAS_PC_UPDATED;
1240             goto egress;
1241         }
1242     }
1243 
1244     if (use_goto_tb(s, s->pc_tmp)) {
1245         if (is_imm && use_goto_tb(s, dest)) {
1246             /* Both exits can use goto_tb.  */
1247             update_cc_op(s);
1248 
1249             lab = gen_new_label();
1250             if (c->is_64) {
1251                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1252             } else {
1253                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1254             }
1255 
1256             /* Branch not taken.  */
1257             tcg_gen_goto_tb(0);
1258             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1259             tcg_gen_exit_tb(s->base.tb, 0);
1260 
1261             /* Branch taken.  */
1262             gen_set_label(lab);
1263             per_breaking_event(s);
1264             tcg_gen_goto_tb(1);
1265             tcg_gen_movi_i64(psw_addr, dest);
1266             tcg_gen_exit_tb(s->base.tb, 1);
1267 
1268             ret = DISAS_NORETURN;
1269         } else {
1270             /* Fallthru can use goto_tb, but taken branch cannot.  */
1271             /* Store taken branch destination before the brcond.  This
1272                avoids having to allocate a new local temp to hold it.
1273                We'll overwrite this in the not taken case anyway.  */
1274             if (!is_imm) {
1275                 tcg_gen_mov_i64(psw_addr, cdest);
1276             }
1277 
1278             lab = gen_new_label();
1279             if (c->is_64) {
1280                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1281             } else {
1282                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1283             }
1284 
1285             /* Branch not taken.  */
1286             update_cc_op(s);
1287             tcg_gen_goto_tb(0);
1288             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1289             tcg_gen_exit_tb(s->base.tb, 0);
1290 
1291             gen_set_label(lab);
1292             if (is_imm) {
1293                 tcg_gen_movi_i64(psw_addr, dest);
1294             }
1295             per_breaking_event(s);
1296             ret = DISAS_PC_UPDATED;
1297         }
1298     } else {
1299         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1300            Most commonly we're single-stepping or some other condition that
1301            disables all use of goto_tb.  Just update the PC and exit.  */
1302 
1303         TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1304         if (is_imm) {
1305             cdest = tcg_const_i64(dest);
1306         }
1307 
1308         if (c->is_64) {
1309             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1310                                 cdest, next);
1311             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1312         } else {
1313             TCGv_i32 t0 = tcg_temp_new_i32();
1314             TCGv_i64 t1 = tcg_temp_new_i64();
1315             TCGv_i64 z = tcg_const_i64(0);
1316             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1317             tcg_gen_extu_i32_i64(t1, t0);
1318             tcg_temp_free_i32(t0);
1319             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1320             per_branch_cond(s, TCG_COND_NE, t1, z);
1321             tcg_temp_free_i64(t1);
1322             tcg_temp_free_i64(z);
1323         }
1324 
1325         if (is_imm) {
1326             tcg_temp_free_i64(cdest);
1327         }
1328         tcg_temp_free_i64(next);
1329 
1330         ret = DISAS_PC_UPDATED;
1331     }
1332 
1333  egress:
1334     free_compare(c);
1335     return ret;
1336 }
1337 
1338 /* ====================================================================== */
1339 /* The operations.  These perform the bulk of the work for any insn,
1340    usually after the operands have been loaded and output initialized.  */
1341 
1342 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1343 {
1344     tcg_gen_abs_i64(o->out, o->in2);
1345     return DISAS_NEXT;
1346 }
1347 
1348 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1349 {
1350     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1351     return DISAS_NEXT;
1352 }
1353 
1354 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1355 {
1356     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1357     return DISAS_NEXT;
1358 }
1359 
1360 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1361 {
1362     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1363     tcg_gen_mov_i64(o->out2, o->in2);
1364     return DISAS_NEXT;
1365 }
1366 
1367 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1368 {
1369     tcg_gen_add_i64(o->out, o->in1, o->in2);
1370     return DISAS_NEXT;
1371 }
1372 
1373 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1374 {
1375     tcg_gen_movi_i64(cc_src, 0);
1376     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1377     return DISAS_NEXT;
1378 }
1379 
1380 /* Compute carry into cc_src. */
1381 static void compute_carry(DisasContext *s)
1382 {
1383     switch (s->cc_op) {
1384     case CC_OP_ADDU:
1385         /* The carry value is already in cc_src (1,0). */
1386         break;
1387     case CC_OP_SUBU:
1388         tcg_gen_addi_i64(cc_src, cc_src, 1);
1389         break;
1390     default:
1391         gen_op_calc_cc(s);
1392         /* fall through */
1393     case CC_OP_STATIC:
1394         /* The carry flag is the msb of CC; compute into cc_src. */
1395         tcg_gen_extu_i32_i64(cc_src, cc_op);
1396         tcg_gen_shri_i64(cc_src, cc_src, 1);
1397         break;
1398     }
1399 }
1400 
1401 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1402 {
1403     compute_carry(s);
1404     tcg_gen_add_i64(o->out, o->in1, o->in2);
1405     tcg_gen_add_i64(o->out, o->out, cc_src);
1406     return DISAS_NEXT;
1407 }
1408 
1409 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1410 {
1411     compute_carry(s);
1412 
1413     TCGv_i64 zero = tcg_const_i64(0);
1414     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1415     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1416     tcg_temp_free_i64(zero);
1417 
1418     return DISAS_NEXT;
1419 }
1420 
1421 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1422 {
1423     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1424 
1425     o->in1 = tcg_temp_new_i64();
1426     if (non_atomic) {
1427         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1428     } else {
1429         /* Perform the atomic addition in memory. */
1430         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1431                                      s->insn->data);
1432     }
1433 
1434     /* Recompute also for atomic case: needed for setting CC. */
1435     tcg_gen_add_i64(o->out, o->in1, o->in2);
1436 
1437     if (non_atomic) {
1438         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1439     }
1440     return DISAS_NEXT;
1441 }
1442 
1443 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1444 {
1445     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1446 
1447     o->in1 = tcg_temp_new_i64();
1448     if (non_atomic) {
1449         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1450     } else {
1451         /* Perform the atomic addition in memory. */
1452         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1453                                      s->insn->data);
1454     }
1455 
1456     /* Recompute also for atomic case: needed for setting CC. */
1457     tcg_gen_movi_i64(cc_src, 0);
1458     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1459 
1460     if (non_atomic) {
1461         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1462     }
1463     return DISAS_NEXT;
1464 }
1465 
1466 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1467 {
1468     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1469     return DISAS_NEXT;
1470 }
1471 
1472 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1473 {
1474     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1475     return DISAS_NEXT;
1476 }
1477 
1478 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1479 {
1480     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1481     return DISAS_NEXT;
1482 }
1483 
1484 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1485 {
1486     tcg_gen_and_i64(o->out, o->in1, o->in2);
1487     return DISAS_NEXT;
1488 }
1489 
1490 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1491 {
1492     int shift = s->insn->data & 0xff;
1493     int size = s->insn->data >> 8;
1494     uint64_t mask = ((1ull << size) - 1) << shift;
1495 
1496     assert(!o->g_in2);
1497     tcg_gen_shli_i64(o->in2, o->in2, shift);
1498     tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1499     tcg_gen_and_i64(o->out, o->in1, o->in2);
1500 
1501     /* Produce the CC from only the bits manipulated.  */
1502     tcg_gen_andi_i64(cc_dst, o->out, mask);
1503     set_cc_nz_u64(s, cc_dst);
1504     return DISAS_NEXT;
1505 }
1506 
1507 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1508 {
1509     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1510     return DISAS_NEXT;
1511 }
1512 
1513 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1514 {
1515     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1516     return DISAS_NEXT;
1517 }
1518 
1519 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1520 {
1521     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1522     return DISAS_NEXT;
1523 }
1524 
1525 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1526 {
1527     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1528     return DISAS_NEXT;
1529 }
1530 
1531 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1532 {
1533     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1534     return DISAS_NEXT;
1535 }
1536 
1537 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1538 {
1539     o->in1 = tcg_temp_new_i64();
1540 
1541     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1542         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1543     } else {
1544         /* Perform the atomic operation in memory. */
1545         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1546                                      s->insn->data);
1547     }
1548 
1549     /* Recompute also for atomic case: needed for setting CC. */
1550     tcg_gen_and_i64(o->out, o->in1, o->in2);
1551 
1552     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1553         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1554     }
1555     return DISAS_NEXT;
1556 }
1557 
1558 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1559 {
1560     pc_to_link_info(o->out, s, s->pc_tmp);
1561     if (o->in2) {
1562         tcg_gen_mov_i64(psw_addr, o->in2);
1563         per_branch(s, false);
1564         return DISAS_PC_UPDATED;
1565     } else {
1566         return DISAS_NEXT;
1567     }
1568 }
1569 
1570 static void save_link_info(DisasContext *s, DisasOps *o)
1571 {
1572     TCGv_i64 t;
1573 
1574     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1575         pc_to_link_info(o->out, s, s->pc_tmp);
1576         return;
1577     }
1578     gen_op_calc_cc(s);
1579     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1580     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1581     t = tcg_temp_new_i64();
1582     tcg_gen_shri_i64(t, psw_mask, 16);
1583     tcg_gen_andi_i64(t, t, 0x0f000000);
1584     tcg_gen_or_i64(o->out, o->out, t);
1585     tcg_gen_extu_i32_i64(t, cc_op);
1586     tcg_gen_shli_i64(t, t, 28);
1587     tcg_gen_or_i64(o->out, o->out, t);
1588     tcg_temp_free_i64(t);
1589 }
1590 
1591 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1592 {
1593     save_link_info(s, o);
1594     if (o->in2) {
1595         tcg_gen_mov_i64(psw_addr, o->in2);
1596         per_branch(s, false);
1597         return DISAS_PC_UPDATED;
1598     } else {
1599         return DISAS_NEXT;
1600     }
1601 }
1602 
1603 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1604 {
1605     pc_to_link_info(o->out, s, s->pc_tmp);
1606     return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2);
1607 }
1608 
1609 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1610 {
1611     int m1 = get_field(s, m1);
1612     bool is_imm = have_field(s, i2);
1613     int imm = is_imm ? get_field(s, i2) : 0;
1614     DisasCompare c;
1615 
1616     /* BCR with R2 = 0 causes no branching */
1617     if (have_field(s, r2) && get_field(s, r2) == 0) {
1618         if (m1 == 14) {
1619             /* Perform serialization */
1620             /* FIXME: check for fast-BCR-serialization facility */
1621             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1622         }
1623         if (m1 == 15) {
1624             /* Perform serialization */
1625             /* FIXME: perform checkpoint-synchronisation */
1626             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1627         }
1628         return DISAS_NEXT;
1629     }
1630 
1631     disas_jcc(s, &c, m1);
1632     return help_branch(s, &c, is_imm, imm, o->in2);
1633 }
1634 
1635 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1636 {
1637     int r1 = get_field(s, r1);
1638     bool is_imm = have_field(s, i2);
1639     int imm = is_imm ? get_field(s, i2) : 0;
1640     DisasCompare c;
1641     TCGv_i64 t;
1642 
1643     c.cond = TCG_COND_NE;
1644     c.is_64 = false;
1645     c.g1 = false;
1646     c.g2 = false;
1647 
1648     t = tcg_temp_new_i64();
1649     tcg_gen_subi_i64(t, regs[r1], 1);
1650     store_reg32_i64(r1, t);
1651     c.u.s32.a = tcg_temp_new_i32();
1652     c.u.s32.b = tcg_const_i32(0);
1653     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1654     tcg_temp_free_i64(t);
1655 
1656     return help_branch(s, &c, is_imm, imm, o->in2);
1657 }
1658 
1659 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1660 {
1661     int r1 = get_field(s, r1);
1662     int imm = get_field(s, i2);
1663     DisasCompare c;
1664     TCGv_i64 t;
1665 
1666     c.cond = TCG_COND_NE;
1667     c.is_64 = false;
1668     c.g1 = false;
1669     c.g2 = false;
1670 
1671     t = tcg_temp_new_i64();
1672     tcg_gen_shri_i64(t, regs[r1], 32);
1673     tcg_gen_subi_i64(t, t, 1);
1674     store_reg32h_i64(r1, t);
1675     c.u.s32.a = tcg_temp_new_i32();
1676     c.u.s32.b = tcg_const_i32(0);
1677     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1678     tcg_temp_free_i64(t);
1679 
1680     return help_branch(s, &c, 1, imm, o->in2);
1681 }
1682 
1683 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1684 {
1685     int r1 = get_field(s, r1);
1686     bool is_imm = have_field(s, i2);
1687     int imm = is_imm ? get_field(s, i2) : 0;
1688     DisasCompare c;
1689 
1690     c.cond = TCG_COND_NE;
1691     c.is_64 = true;
1692     c.g1 = true;
1693     c.g2 = false;
1694 
1695     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1696     c.u.s64.a = regs[r1];
1697     c.u.s64.b = tcg_const_i64(0);
1698 
1699     return help_branch(s, &c, is_imm, imm, o->in2);
1700 }
1701 
1702 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1703 {
1704     int r1 = get_field(s, r1);
1705     int r3 = get_field(s, r3);
1706     bool is_imm = have_field(s, i2);
1707     int imm = is_imm ? get_field(s, i2) : 0;
1708     DisasCompare c;
1709     TCGv_i64 t;
1710 
1711     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1712     c.is_64 = false;
1713     c.g1 = false;
1714     c.g2 = false;
1715 
1716     t = tcg_temp_new_i64();
1717     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1718     c.u.s32.a = tcg_temp_new_i32();
1719     c.u.s32.b = tcg_temp_new_i32();
1720     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1721     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1722     store_reg32_i64(r1, t);
1723     tcg_temp_free_i64(t);
1724 
1725     return help_branch(s, &c, is_imm, imm, o->in2);
1726 }
1727 
1728 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1729 {
1730     int r1 = get_field(s, r1);
1731     int r3 = get_field(s, r3);
1732     bool is_imm = have_field(s, i2);
1733     int imm = is_imm ? get_field(s, i2) : 0;
1734     DisasCompare c;
1735 
1736     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1737     c.is_64 = true;
1738 
1739     if (r1 == (r3 | 1)) {
1740         c.u.s64.b = load_reg(r3 | 1);
1741         c.g2 = false;
1742     } else {
1743         c.u.s64.b = regs[r3 | 1];
1744         c.g2 = true;
1745     }
1746 
1747     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1748     c.u.s64.a = regs[r1];
1749     c.g1 = true;
1750 
1751     return help_branch(s, &c, is_imm, imm, o->in2);
1752 }
1753 
1754 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1755 {
1756     int imm, m3 = get_field(s, m3);
1757     bool is_imm;
1758     DisasCompare c;
1759 
1760     c.cond = ltgt_cond[m3];
1761     if (s->insn->data) {
1762         c.cond = tcg_unsigned_cond(c.cond);
1763     }
1764     c.is_64 = c.g1 = c.g2 = true;
1765     c.u.s64.a = o->in1;
1766     c.u.s64.b = o->in2;
1767 
1768     is_imm = have_field(s, i4);
1769     if (is_imm) {
1770         imm = get_field(s, i4);
1771     } else {
1772         imm = 0;
1773         o->out = get_address(s, 0, get_field(s, b4),
1774                              get_field(s, d4));
1775     }
1776 
1777     return help_branch(s, &c, is_imm, imm, o->out);
1778 }
1779 
1780 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1781 {
1782     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1783     set_cc_static(s);
1784     return DISAS_NEXT;
1785 }
1786 
1787 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1788 {
1789     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1790     set_cc_static(s);
1791     return DISAS_NEXT;
1792 }
1793 
1794 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1795 {
1796     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1797     set_cc_static(s);
1798     return DISAS_NEXT;
1799 }
1800 
1801 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1802                                    bool m4_with_fpe)
1803 {
1804     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1805     uint8_t m3 = get_field(s, m3);
1806     uint8_t m4 = get_field(s, m4);
1807 
1808     /* m3 field was introduced with FPE */
1809     if (!fpe && m3_with_fpe) {
1810         m3 = 0;
1811     }
1812     /* m4 field was introduced with FPE */
1813     if (!fpe && m4_with_fpe) {
1814         m4 = 0;
1815     }
1816 
1817     /* Check for valid rounding modes. Mode 3 was introduced later. */
1818     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1819         gen_program_exception(s, PGM_SPECIFICATION);
1820         return NULL;
1821     }
1822 
1823     return tcg_const_i32(deposit32(m3, 4, 4, m4));
1824 }
1825 
1826 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1827 {
1828     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1829 
1830     if (!m34) {
1831         return DISAS_NORETURN;
1832     }
1833     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1834     tcg_temp_free_i32(m34);
1835     set_cc_static(s);
1836     return DISAS_NEXT;
1837 }
1838 
1839 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1840 {
1841     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1842 
1843     if (!m34) {
1844         return DISAS_NORETURN;
1845     }
1846     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1847     tcg_temp_free_i32(m34);
1848     set_cc_static(s);
1849     return DISAS_NEXT;
1850 }
1851 
1852 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1853 {
1854     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1855 
1856     if (!m34) {
1857         return DISAS_NORETURN;
1858     }
1859     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1860     tcg_temp_free_i32(m34);
1861     set_cc_static(s);
1862     return DISAS_NEXT;
1863 }
1864 
1865 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1866 {
1867     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1868 
1869     if (!m34) {
1870         return DISAS_NORETURN;
1871     }
1872     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1873     tcg_temp_free_i32(m34);
1874     set_cc_static(s);
1875     return DISAS_NEXT;
1876 }
1877 
1878 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1879 {
1880     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1881 
1882     if (!m34) {
1883         return DISAS_NORETURN;
1884     }
1885     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1886     tcg_temp_free_i32(m34);
1887     set_cc_static(s);
1888     return DISAS_NEXT;
1889 }
1890 
1891 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1892 {
1893     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1894 
1895     if (!m34) {
1896         return DISAS_NORETURN;
1897     }
1898     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1899     tcg_temp_free_i32(m34);
1900     set_cc_static(s);
1901     return DISAS_NEXT;
1902 }
1903 
1904 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1905 {
1906     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1907 
1908     if (!m34) {
1909         return DISAS_NORETURN;
1910     }
1911     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1912     tcg_temp_free_i32(m34);
1913     set_cc_static(s);
1914     return DISAS_NEXT;
1915 }
1916 
1917 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1918 {
1919     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1920 
1921     if (!m34) {
1922         return DISAS_NORETURN;
1923     }
1924     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1925     tcg_temp_free_i32(m34);
1926     set_cc_static(s);
1927     return DISAS_NEXT;
1928 }
1929 
1930 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1931 {
1932     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1933 
1934     if (!m34) {
1935         return DISAS_NORETURN;
1936     }
1937     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1938     tcg_temp_free_i32(m34);
1939     set_cc_static(s);
1940     return DISAS_NEXT;
1941 }
1942 
1943 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1944 {
1945     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1946 
1947     if (!m34) {
1948         return DISAS_NORETURN;
1949     }
1950     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1951     tcg_temp_free_i32(m34);
1952     set_cc_static(s);
1953     return DISAS_NEXT;
1954 }
1955 
1956 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1957 {
1958     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1959 
1960     if (!m34) {
1961         return DISAS_NORETURN;
1962     }
1963     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1964     tcg_temp_free_i32(m34);
1965     set_cc_static(s);
1966     return DISAS_NEXT;
1967 }
1968 
1969 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1970 {
1971     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1972 
1973     if (!m34) {
1974         return DISAS_NORETURN;
1975     }
1976     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1977     tcg_temp_free_i32(m34);
1978     set_cc_static(s);
1979     return DISAS_NEXT;
1980 }
1981 
1982 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1983 {
1984     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1985 
1986     if (!m34) {
1987         return DISAS_NORETURN;
1988     }
1989     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1990     tcg_temp_free_i32(m34);
1991     return DISAS_NEXT;
1992 }
1993 
1994 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1995 {
1996     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1997 
1998     if (!m34) {
1999         return DISAS_NORETURN;
2000     }
2001     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
2002     tcg_temp_free_i32(m34);
2003     return DISAS_NEXT;
2004 }
2005 
2006 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
2007 {
2008     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2009 
2010     if (!m34) {
2011         return DISAS_NORETURN;
2012     }
2013     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
2014     tcg_temp_free_i32(m34);
2015     return DISAS_NEXT;
2016 }
2017 
2018 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2019 {
2020     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2021 
2022     if (!m34) {
2023         return DISAS_NORETURN;
2024     }
2025     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2026     tcg_temp_free_i32(m34);
2027     return DISAS_NEXT;
2028 }
2029 
2030 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2031 {
2032     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2033 
2034     if (!m34) {
2035         return DISAS_NORETURN;
2036     }
2037     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2038     tcg_temp_free_i32(m34);
2039     return DISAS_NEXT;
2040 }
2041 
2042 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2043 {
2044     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2045 
2046     if (!m34) {
2047         return DISAS_NORETURN;
2048     }
2049     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
2050     tcg_temp_free_i32(m34);
2051     return DISAS_NEXT;
2052 }
2053 
2054 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2055 {
2056     int r2 = get_field(s, r2);
2057     TCGv_i128 pair = tcg_temp_new_i128();
2058     TCGv_i64 len = tcg_temp_new_i64();
2059 
2060     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2061     set_cc_static(s);
2062     tcg_gen_extr_i128_i64(o->out, len, pair);
2063     tcg_temp_free_i128(pair);
2064 
2065     tcg_gen_add_i64(regs[r2], regs[r2], len);
2066     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2067     tcg_temp_free_i64(len);
2068 
2069     return DISAS_NEXT;
2070 }
2071 
2072 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2073 {
2074     int l = get_field(s, l1);
2075     TCGv_i32 vl;
2076 
2077     switch (l + 1) {
2078     case 1:
2079         tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2080         tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2081         break;
2082     case 2:
2083         tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2084         tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2085         break;
2086     case 4:
2087         tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2088         tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2089         break;
2090     case 8:
2091         tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2092         tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2093         break;
2094     default:
2095         vl = tcg_const_i32(l);
2096         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2097         tcg_temp_free_i32(vl);
2098         set_cc_static(s);
2099         return DISAS_NEXT;
2100     }
2101     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2102     return DISAS_NEXT;
2103 }
2104 
2105 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2106 {
2107     int r1 = get_field(s, r1);
2108     int r2 = get_field(s, r2);
2109     TCGv_i32 t1, t2;
2110 
2111     /* r1 and r2 must be even.  */
2112     if (r1 & 1 || r2 & 1) {
2113         gen_program_exception(s, PGM_SPECIFICATION);
2114         return DISAS_NORETURN;
2115     }
2116 
2117     t1 = tcg_const_i32(r1);
2118     t2 = tcg_const_i32(r2);
2119     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2120     tcg_temp_free_i32(t1);
2121     tcg_temp_free_i32(t2);
2122     set_cc_static(s);
2123     return DISAS_NEXT;
2124 }
2125 
2126 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2127 {
2128     int r1 = get_field(s, r1);
2129     int r3 = get_field(s, r3);
2130     TCGv_i32 t1, t3;
2131 
2132     /* r1 and r3 must be even.  */
2133     if (r1 & 1 || r3 & 1) {
2134         gen_program_exception(s, PGM_SPECIFICATION);
2135         return DISAS_NORETURN;
2136     }
2137 
2138     t1 = tcg_const_i32(r1);
2139     t3 = tcg_const_i32(r3);
2140     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2141     tcg_temp_free_i32(t1);
2142     tcg_temp_free_i32(t3);
2143     set_cc_static(s);
2144     return DISAS_NEXT;
2145 }
2146 
2147 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2148 {
2149     int r1 = get_field(s, r1);
2150     int r3 = get_field(s, r3);
2151     TCGv_i32 t1, t3;
2152 
2153     /* r1 and r3 must be even.  */
2154     if (r1 & 1 || r3 & 1) {
2155         gen_program_exception(s, PGM_SPECIFICATION);
2156         return DISAS_NORETURN;
2157     }
2158 
2159     t1 = tcg_const_i32(r1);
2160     t3 = tcg_const_i32(r3);
2161     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2162     tcg_temp_free_i32(t1);
2163     tcg_temp_free_i32(t3);
2164     set_cc_static(s);
2165     return DISAS_NEXT;
2166 }
2167 
2168 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2169 {
2170     TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2171     TCGv_i32 t1 = tcg_temp_new_i32();
2172     tcg_gen_extrl_i64_i32(t1, o->in1);
2173     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2174     set_cc_static(s);
2175     tcg_temp_free_i32(t1);
2176     tcg_temp_free_i32(m3);
2177     return DISAS_NEXT;
2178 }
2179 
2180 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2181 {
2182     TCGv_i128 pair = tcg_temp_new_i128();
2183 
2184     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2185     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2186     tcg_temp_free_i128(pair);
2187 
2188     set_cc_static(s);
2189     return DISAS_NEXT;
2190 }
2191 
2192 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2193 {
2194     TCGv_i64 t = tcg_temp_new_i64();
2195     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2196     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2197     tcg_gen_or_i64(o->out, o->out, t);
2198     tcg_temp_free_i64(t);
2199     return DISAS_NEXT;
2200 }
2201 
2202 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2203 {
2204     int d2 = get_field(s, d2);
2205     int b2 = get_field(s, b2);
2206     TCGv_i64 addr, cc;
2207 
2208     /* Note that in1 = R3 (new value) and
2209        in2 = (zero-extended) R1 (expected value).  */
2210 
2211     addr = get_address(s, 0, b2, d2);
2212     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2213                                get_mem_index(s), s->insn->data | MO_ALIGN);
2214     tcg_temp_free_i64(addr);
2215 
2216     /* Are the memory and expected values (un)equal?  Note that this setcond
2217        produces the output CC value, thus the NE sense of the test.  */
2218     cc = tcg_temp_new_i64();
2219     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2220     tcg_gen_extrl_i64_i32(cc_op, cc);
2221     tcg_temp_free_i64(cc);
2222     set_cc_static(s);
2223 
2224     return DISAS_NEXT;
2225 }
2226 
2227 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2228 {
2229     int r1 = get_field(s, r1);
2230 
2231     o->out_128 = tcg_temp_new_i128();
2232     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2233 
2234     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2235     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2236                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2237 
2238     /*
2239      * Extract result into cc_dst:cc_src, compare vs the expected value
2240      * in the as yet unmodified input registers, then update CC_OP.
2241      */
2242     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2243     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2244     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2245     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2246     set_cc_nz_u64(s, cc_dst);
2247 
2248     return DISAS_NEXT;
2249 }
2250 
2251 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2252 {
2253     int r3 = get_field(s, r3);
2254     TCGv_i32 t_r3 = tcg_const_i32(r3);
2255 
2256     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2257         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2258     } else {
2259         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2260     }
2261     tcg_temp_free_i32(t_r3);
2262 
2263     set_cc_static(s);
2264     return DISAS_NEXT;
2265 }
2266 
2267 #ifndef CONFIG_USER_ONLY
2268 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2269 {
2270     MemOp mop = s->insn->data;
2271     TCGv_i64 addr, old, cc;
2272     TCGLabel *lab = gen_new_label();
2273 
2274     /* Note that in1 = R1 (zero-extended expected value),
2275        out = R1 (original reg), out2 = R1+1 (new value).  */
2276 
2277     addr = tcg_temp_new_i64();
2278     old = tcg_temp_new_i64();
2279     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2280     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2281                                get_mem_index(s), mop | MO_ALIGN);
2282     tcg_temp_free_i64(addr);
2283 
2284     /* Are the memory and expected values (un)equal?  */
2285     cc = tcg_temp_new_i64();
2286     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2287     tcg_gen_extrl_i64_i32(cc_op, cc);
2288 
2289     /* Write back the output now, so that it happens before the
2290        following branch, so that we don't need local temps.  */
2291     if ((mop & MO_SIZE) == MO_32) {
2292         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2293     } else {
2294         tcg_gen_mov_i64(o->out, old);
2295     }
2296     tcg_temp_free_i64(old);
2297 
2298     /* If the comparison was equal, and the LSB of R2 was set,
2299        then we need to flush the TLB (for all cpus).  */
2300     tcg_gen_xori_i64(cc, cc, 1);
2301     tcg_gen_and_i64(cc, cc, o->in2);
2302     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2303     tcg_temp_free_i64(cc);
2304 
2305     gen_helper_purge(cpu_env);
2306     gen_set_label(lab);
2307 
2308     return DISAS_NEXT;
2309 }
2310 #endif
2311 
2312 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2313 {
2314     TCGv_i64 t1 = tcg_temp_new_i64();
2315     TCGv_i32 t2 = tcg_temp_new_i32();
2316     tcg_gen_extrl_i64_i32(t2, o->in1);
2317     gen_helper_cvd(t1, t2);
2318     tcg_temp_free_i32(t2);
2319     tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2320     tcg_temp_free_i64(t1);
2321     return DISAS_NEXT;
2322 }
2323 
2324 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2325 {
2326     int m3 = get_field(s, m3);
2327     TCGLabel *lab = gen_new_label();
2328     TCGCond c;
2329 
2330     c = tcg_invert_cond(ltgt_cond[m3]);
2331     if (s->insn->data) {
2332         c = tcg_unsigned_cond(c);
2333     }
2334     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2335 
2336     /* Trap.  */
2337     gen_trap(s);
2338 
2339     gen_set_label(lab);
2340     return DISAS_NEXT;
2341 }
2342 
2343 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2344 {
2345     int m3 = get_field(s, m3);
2346     int r1 = get_field(s, r1);
2347     int r2 = get_field(s, r2);
2348     TCGv_i32 tr1, tr2, chk;
2349 
2350     /* R1 and R2 must both be even.  */
2351     if ((r1 | r2) & 1) {
2352         gen_program_exception(s, PGM_SPECIFICATION);
2353         return DISAS_NORETURN;
2354     }
2355     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2356         m3 = 0;
2357     }
2358 
2359     tr1 = tcg_const_i32(r1);
2360     tr2 = tcg_const_i32(r2);
2361     chk = tcg_const_i32(m3);
2362 
2363     switch (s->insn->data) {
2364     case 12:
2365         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2366         break;
2367     case 14:
2368         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2369         break;
2370     case 21:
2371         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2372         break;
2373     case 24:
2374         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2375         break;
2376     case 41:
2377         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2378         break;
2379     case 42:
2380         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2381         break;
2382     default:
2383         g_assert_not_reached();
2384     }
2385 
2386     tcg_temp_free_i32(tr1);
2387     tcg_temp_free_i32(tr2);
2388     tcg_temp_free_i32(chk);
2389     set_cc_static(s);
2390     return DISAS_NEXT;
2391 }
2392 
2393 #ifndef CONFIG_USER_ONLY
2394 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2395 {
2396     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2397     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2398     TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2399 
2400     gen_helper_diag(cpu_env, r1, r3, func_code);
2401 
2402     tcg_temp_free_i32(func_code);
2403     tcg_temp_free_i32(r3);
2404     tcg_temp_free_i32(r1);
2405     return DISAS_NEXT;
2406 }
2407 #endif
2408 
2409 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2410 {
2411     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2412     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2413     return DISAS_NEXT;
2414 }
2415 
2416 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2417 {
2418     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2419     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2420     return DISAS_NEXT;
2421 }
2422 
2423 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2424 {
2425     TCGv_i128 t = tcg_temp_new_i128();
2426 
2427     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2428     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2429     tcg_temp_free_i128(t);
2430     return DISAS_NEXT;
2431 }
2432 
2433 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2434 {
2435     TCGv_i128 t = tcg_temp_new_i128();
2436 
2437     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2438     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2439     tcg_temp_free_i128(t);
2440     return DISAS_NEXT;
2441 }
2442 
2443 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2444 {
2445     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2446     return DISAS_NEXT;
2447 }
2448 
2449 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2450 {
2451     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2452     return DISAS_NEXT;
2453 }
2454 
2455 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2456 {
2457     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2458     return DISAS_NEXT;
2459 }
2460 
2461 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2462 {
2463     int r2 = get_field(s, r2);
2464     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2465     return DISAS_NEXT;
2466 }
2467 
2468 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2469 {
2470     /* No cache information provided.  */
2471     tcg_gen_movi_i64(o->out, -1);
2472     return DISAS_NEXT;
2473 }
2474 
2475 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2476 {
2477     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2478     return DISAS_NEXT;
2479 }
2480 
2481 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2482 {
2483     int r1 = get_field(s, r1);
2484     int r2 = get_field(s, r2);
2485     TCGv_i64 t = tcg_temp_new_i64();
2486 
2487     /* Note the "subsequently" in the PoO, which implies a defined result
2488        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2489     tcg_gen_shri_i64(t, psw_mask, 32);
2490     store_reg32_i64(r1, t);
2491     if (r2 != 0) {
2492         store_reg32_i64(r2, psw_mask);
2493     }
2494 
2495     tcg_temp_free_i64(t);
2496     return DISAS_NEXT;
2497 }
2498 
2499 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2500 {
2501     int r1 = get_field(s, r1);
2502     TCGv_i32 ilen;
2503     TCGv_i64 v1;
2504 
2505     /* Nested EXECUTE is not allowed.  */
2506     if (unlikely(s->ex_value)) {
2507         gen_program_exception(s, PGM_EXECUTE);
2508         return DISAS_NORETURN;
2509     }
2510 
2511     update_psw_addr(s);
2512     update_cc_op(s);
2513 
2514     if (r1 == 0) {
2515         v1 = tcg_const_i64(0);
2516     } else {
2517         v1 = regs[r1];
2518     }
2519 
2520     ilen = tcg_const_i32(s->ilen);
2521     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2522     tcg_temp_free_i32(ilen);
2523 
2524     if (r1 == 0) {
2525         tcg_temp_free_i64(v1);
2526     }
2527 
2528     return DISAS_PC_CC_UPDATED;
2529 }
2530 
2531 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2532 {
2533     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2534 
2535     if (!m34) {
2536         return DISAS_NORETURN;
2537     }
2538     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2539     tcg_temp_free_i32(m34);
2540     return DISAS_NEXT;
2541 }
2542 
2543 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2544 {
2545     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2546 
2547     if (!m34) {
2548         return DISAS_NORETURN;
2549     }
2550     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2551     tcg_temp_free_i32(m34);
2552     return DISAS_NEXT;
2553 }
2554 
2555 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2556 {
2557     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2558 
2559     if (!m34) {
2560         return DISAS_NORETURN;
2561     }
2562     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2563     tcg_temp_free_i32(m34);
2564     return DISAS_NEXT;
2565 }
2566 
2567 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2568 {
2569     /* We'll use the original input for cc computation, since we get to
2570        compare that against 0, which ought to be better than comparing
2571        the real output against 64.  It also lets cc_dst be a convenient
2572        temporary during our computation.  */
2573     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2574 
2575     /* R1 = IN ? CLZ(IN) : 64.  */
2576     tcg_gen_clzi_i64(o->out, o->in2, 64);
2577 
2578     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2579        value by 64, which is undefined.  But since the shift is 64 iff the
2580        input is zero, we still get the correct result after and'ing.  */
2581     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2582     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2583     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2584     return DISAS_NEXT;
2585 }
2586 
2587 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2588 {
2589     int m3 = get_field(s, m3);
2590     int pos, len, base = s->insn->data;
2591     TCGv_i64 tmp = tcg_temp_new_i64();
2592     uint64_t ccm;
2593 
2594     switch (m3) {
2595     case 0xf:
2596         /* Effectively a 32-bit load.  */
2597         tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2598         len = 32;
2599         goto one_insert;
2600 
2601     case 0xc:
2602     case 0x6:
2603     case 0x3:
2604         /* Effectively a 16-bit load.  */
2605         tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2606         len = 16;
2607         goto one_insert;
2608 
2609     case 0x8:
2610     case 0x4:
2611     case 0x2:
2612     case 0x1:
2613         /* Effectively an 8-bit load.  */
2614         tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2615         len = 8;
2616         goto one_insert;
2617 
2618     one_insert:
2619         pos = base + ctz32(m3) * 8;
2620         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2621         ccm = ((1ull << len) - 1) << pos;
2622         break;
2623 
2624     default:
2625         /* This is going to be a sequence of loads and inserts.  */
2626         pos = base + 32 - 8;
2627         ccm = 0;
2628         while (m3) {
2629             if (m3 & 0x8) {
2630                 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2631                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2632                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2633                 ccm |= 0xffull << pos;
2634             }
2635             m3 = (m3 << 1) & 0xf;
2636             pos -= 8;
2637         }
2638         break;
2639     }
2640 
2641     tcg_gen_movi_i64(tmp, ccm);
2642     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2643     tcg_temp_free_i64(tmp);
2644     return DISAS_NEXT;
2645 }
2646 
2647 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2648 {
2649     int shift = s->insn->data & 0xff;
2650     int size = s->insn->data >> 8;
2651     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2652     return DISAS_NEXT;
2653 }
2654 
2655 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2656 {
2657     TCGv_i64 t1, t2;
2658 
2659     gen_op_calc_cc(s);
2660     t1 = tcg_temp_new_i64();
2661     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2662     t2 = tcg_temp_new_i64();
2663     tcg_gen_extu_i32_i64(t2, cc_op);
2664     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2665     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2666     tcg_temp_free_i64(t1);
2667     tcg_temp_free_i64(t2);
2668     return DISAS_NEXT;
2669 }
2670 
2671 #ifndef CONFIG_USER_ONLY
2672 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2673 {
2674     TCGv_i32 m4;
2675 
2676     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2677         m4 = tcg_const_i32(get_field(s, m4));
2678     } else {
2679         m4 = tcg_const_i32(0);
2680     }
2681     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2682     tcg_temp_free_i32(m4);
2683     return DISAS_NEXT;
2684 }
2685 
2686 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2687 {
2688     TCGv_i32 m4;
2689 
2690     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2691         m4 = tcg_const_i32(get_field(s, m4));
2692     } else {
2693         m4 = tcg_const_i32(0);
2694     }
2695     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2696     tcg_temp_free_i32(m4);
2697     return DISAS_NEXT;
2698 }
2699 
2700 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2701 {
2702     gen_helper_iske(o->out, cpu_env, o->in2);
2703     return DISAS_NEXT;
2704 }
2705 #endif
2706 
2707 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2708 {
2709     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2710     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2711     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2712     TCGv_i32 t_r1, t_r2, t_r3, type;
2713 
2714     switch (s->insn->data) {
2715     case S390_FEAT_TYPE_KMA:
2716         if (r3 == r1 || r3 == r2) {
2717             gen_program_exception(s, PGM_SPECIFICATION);
2718             return DISAS_NORETURN;
2719         }
2720         /* FALL THROUGH */
2721     case S390_FEAT_TYPE_KMCTR:
2722         if (r3 & 1 || !r3) {
2723             gen_program_exception(s, PGM_SPECIFICATION);
2724             return DISAS_NORETURN;
2725         }
2726         /* FALL THROUGH */
2727     case S390_FEAT_TYPE_PPNO:
2728     case S390_FEAT_TYPE_KMF:
2729     case S390_FEAT_TYPE_KMC:
2730     case S390_FEAT_TYPE_KMO:
2731     case S390_FEAT_TYPE_KM:
2732         if (r1 & 1 || !r1) {
2733             gen_program_exception(s, PGM_SPECIFICATION);
2734             return DISAS_NORETURN;
2735         }
2736         /* FALL THROUGH */
2737     case S390_FEAT_TYPE_KMAC:
2738     case S390_FEAT_TYPE_KIMD:
2739     case S390_FEAT_TYPE_KLMD:
2740         if (r2 & 1 || !r2) {
2741             gen_program_exception(s, PGM_SPECIFICATION);
2742             return DISAS_NORETURN;
2743         }
2744         /* FALL THROUGH */
2745     case S390_FEAT_TYPE_PCKMO:
2746     case S390_FEAT_TYPE_PCC:
2747         break;
2748     default:
2749         g_assert_not_reached();
2750     };
2751 
2752     t_r1 = tcg_const_i32(r1);
2753     t_r2 = tcg_const_i32(r2);
2754     t_r3 = tcg_const_i32(r3);
2755     type = tcg_const_i32(s->insn->data);
2756     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2757     set_cc_static(s);
2758     tcg_temp_free_i32(t_r1);
2759     tcg_temp_free_i32(t_r2);
2760     tcg_temp_free_i32(t_r3);
2761     tcg_temp_free_i32(type);
2762     return DISAS_NEXT;
2763 }
2764 
2765 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2766 {
2767     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2768     set_cc_static(s);
2769     return DISAS_NEXT;
2770 }
2771 
2772 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2773 {
2774     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2775     set_cc_static(s);
2776     return DISAS_NEXT;
2777 }
2778 
2779 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2780 {
2781     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2782     set_cc_static(s);
2783     return DISAS_NEXT;
2784 }
2785 
2786 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2787 {
2788     /* The real output is indeed the original value in memory;
2789        recompute the addition for the computation of CC.  */
2790     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2791                                  s->insn->data | MO_ALIGN);
2792     /* However, we need to recompute the addition for setting CC.  */
2793     tcg_gen_add_i64(o->out, o->in1, o->in2);
2794     return DISAS_NEXT;
2795 }
2796 
2797 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2798 {
2799     /* The real output is indeed the original value in memory;
2800        recompute the addition for the computation of CC.  */
2801     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2802                                  s->insn->data | MO_ALIGN);
2803     /* However, we need to recompute the operation for setting CC.  */
2804     tcg_gen_and_i64(o->out, o->in1, o->in2);
2805     return DISAS_NEXT;
2806 }
2807 
2808 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2809 {
2810     /* The real output is indeed the original value in memory;
2811        recompute the addition for the computation of CC.  */
2812     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2813                                 s->insn->data | MO_ALIGN);
2814     /* However, we need to recompute the operation for setting CC.  */
2815     tcg_gen_or_i64(o->out, o->in1, o->in2);
2816     return DISAS_NEXT;
2817 }
2818 
2819 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2820 {
2821     /* The real output is indeed the original value in memory;
2822        recompute the addition for the computation of CC.  */
2823     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2824                                  s->insn->data | MO_ALIGN);
2825     /* However, we need to recompute the operation for setting CC.  */
2826     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2827     return DISAS_NEXT;
2828 }
2829 
2830 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2831 {
2832     gen_helper_ldeb(o->out, cpu_env, o->in2);
2833     return DISAS_NEXT;
2834 }
2835 
2836 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2837 {
2838     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2839 
2840     if (!m34) {
2841         return DISAS_NORETURN;
2842     }
2843     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2844     tcg_temp_free_i32(m34);
2845     return DISAS_NEXT;
2846 }
2847 
2848 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2849 {
2850     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2851 
2852     if (!m34) {
2853         return DISAS_NORETURN;
2854     }
2855     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2856     tcg_temp_free_i32(m34);
2857     return DISAS_NEXT;
2858 }
2859 
2860 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2861 {
2862     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2863 
2864     if (!m34) {
2865         return DISAS_NORETURN;
2866     }
2867     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2868     tcg_temp_free_i32(m34);
2869     return DISAS_NEXT;
2870 }
2871 
2872 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2873 {
2874     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2875     return DISAS_NEXT;
2876 }
2877 
2878 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2879 {
2880     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2881     return DISAS_NEXT;
2882 }
2883 
2884 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2885 {
2886     tcg_gen_shli_i64(o->out, o->in2, 32);
2887     return DISAS_NEXT;
2888 }
2889 
2890 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2891 {
2892     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2893     return DISAS_NEXT;
2894 }
2895 
2896 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2897 {
2898     tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2899     return DISAS_NEXT;
2900 }
2901 
2902 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2903 {
2904     tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2905     return DISAS_NEXT;
2906 }
2907 
2908 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2909 {
2910     tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2911     return DISAS_NEXT;
2912 }
2913 
2914 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2915 {
2916     tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2917     return DISAS_NEXT;
2918 }
2919 
2920 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2921 {
2922     tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2923     return DISAS_NEXT;
2924 }
2925 
2926 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2927 {
2928     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2929     return DISAS_NEXT;
2930 }
2931 
2932 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2933 {
2934     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2935     return DISAS_NEXT;
2936 }
2937 
2938 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2939 {
2940     TCGLabel *lab = gen_new_label();
2941     store_reg32_i64(get_field(s, r1), o->in2);
2942     /* The value is stored even in case of trap. */
2943     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2944     gen_trap(s);
2945     gen_set_label(lab);
2946     return DISAS_NEXT;
2947 }
2948 
2949 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2950 {
2951     TCGLabel *lab = gen_new_label();
2952     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2953     /* The value is stored even in case of trap. */
2954     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2955     gen_trap(s);
2956     gen_set_label(lab);
2957     return DISAS_NEXT;
2958 }
2959 
2960 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2961 {
2962     TCGLabel *lab = gen_new_label();
2963     store_reg32h_i64(get_field(s, r1), o->in2);
2964     /* The value is stored even in case of trap. */
2965     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2966     gen_trap(s);
2967     gen_set_label(lab);
2968     return DISAS_NEXT;
2969 }
2970 
2971 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2972 {
2973     TCGLabel *lab = gen_new_label();
2974     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2975     /* The value is stored even in case of trap. */
2976     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2977     gen_trap(s);
2978     gen_set_label(lab);
2979     return DISAS_NEXT;
2980 }
2981 
2982 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2983 {
2984     TCGLabel *lab = gen_new_label();
2985     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2986     /* The value is stored even in case of trap. */
2987     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2988     gen_trap(s);
2989     gen_set_label(lab);
2990     return DISAS_NEXT;
2991 }
2992 
2993 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2994 {
2995     DisasCompare c;
2996 
2997     if (have_field(s, m3)) {
2998         /* LOAD * ON CONDITION */
2999         disas_jcc(s, &c, get_field(s, m3));
3000     } else {
3001         /* SELECT */
3002         disas_jcc(s, &c, get_field(s, m4));
3003     }
3004 
3005     if (c.is_64) {
3006         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
3007                             o->in2, o->in1);
3008         free_compare(&c);
3009     } else {
3010         TCGv_i32 t32 = tcg_temp_new_i32();
3011         TCGv_i64 t, z;
3012 
3013         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
3014         free_compare(&c);
3015 
3016         t = tcg_temp_new_i64();
3017         tcg_gen_extu_i32_i64(t, t32);
3018         tcg_temp_free_i32(t32);
3019 
3020         z = tcg_const_i64(0);
3021         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3022         tcg_temp_free_i64(t);
3023         tcg_temp_free_i64(z);
3024     }
3025 
3026     return DISAS_NEXT;
3027 }
3028 
3029 #ifndef CONFIG_USER_ONLY
3030 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3031 {
3032     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3033     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3034     gen_helper_lctl(cpu_env, r1, o->in2, r3);
3035     tcg_temp_free_i32(r1);
3036     tcg_temp_free_i32(r3);
3037     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3038     s->exit_to_mainloop = true;
3039     return DISAS_TOO_MANY;
3040 }
3041 
3042 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3043 {
3044     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3045     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3046     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3047     tcg_temp_free_i32(r1);
3048     tcg_temp_free_i32(r3);
3049     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3050     s->exit_to_mainloop = true;
3051     return DISAS_TOO_MANY;
3052 }
3053 
3054 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3055 {
3056     gen_helper_lra(o->out, cpu_env, o->in2);
3057     set_cc_static(s);
3058     return DISAS_NEXT;
3059 }
3060 
3061 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3062 {
3063     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3064     return DISAS_NEXT;
3065 }
3066 
3067 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3068 {
3069     TCGv_i64 t1, t2;
3070 
3071     per_breaking_event(s);
3072 
3073     t1 = tcg_temp_new_i64();
3074     t2 = tcg_temp_new_i64();
3075     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3076                         MO_TEUL | MO_ALIGN_8);
3077     tcg_gen_addi_i64(o->in2, o->in2, 4);
3078     tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3079     /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
3080     tcg_gen_shli_i64(t1, t1, 32);
3081     gen_helper_load_psw(cpu_env, t1, t2);
3082     tcg_temp_free_i64(t1);
3083     tcg_temp_free_i64(t2);
3084     return DISAS_NORETURN;
3085 }
3086 
3087 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3088 {
3089     TCGv_i64 t1, t2;
3090 
3091     per_breaking_event(s);
3092 
3093     t1 = tcg_temp_new_i64();
3094     t2 = tcg_temp_new_i64();
3095     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3096                         MO_TEUQ | MO_ALIGN_8);
3097     tcg_gen_addi_i64(o->in2, o->in2, 8);
3098     tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3099     gen_helper_load_psw(cpu_env, t1, t2);
3100     tcg_temp_free_i64(t1);
3101     tcg_temp_free_i64(t2);
3102     return DISAS_NORETURN;
3103 }
3104 #endif
3105 
3106 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3107 {
3108     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3109     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3110     gen_helper_lam(cpu_env, r1, o->in2, r3);
3111     tcg_temp_free_i32(r1);
3112     tcg_temp_free_i32(r3);
3113     return DISAS_NEXT;
3114 }
3115 
3116 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3117 {
3118     int r1 = get_field(s, r1);
3119     int r3 = get_field(s, r3);
3120     TCGv_i64 t1, t2;
3121 
3122     /* Only one register to read. */
3123     t1 = tcg_temp_new_i64();
3124     if (unlikely(r1 == r3)) {
3125         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3126         store_reg32_i64(r1, t1);
3127         tcg_temp_free(t1);
3128         return DISAS_NEXT;
3129     }
3130 
3131     /* First load the values of the first and last registers to trigger
3132        possible page faults. */
3133     t2 = tcg_temp_new_i64();
3134     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3135     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3136     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3137     store_reg32_i64(r1, t1);
3138     store_reg32_i64(r3, t2);
3139 
3140     /* Only two registers to read. */
3141     if (((r1 + 1) & 15) == r3) {
3142         tcg_temp_free(t2);
3143         tcg_temp_free(t1);
3144         return DISAS_NEXT;
3145     }
3146 
3147     /* Then load the remaining registers. Page fault can't occur. */
3148     r3 = (r3 - 1) & 15;
3149     tcg_gen_movi_i64(t2, 4);
3150     while (r1 != r3) {
3151         r1 = (r1 + 1) & 15;
3152         tcg_gen_add_i64(o->in2, o->in2, t2);
3153         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3154         store_reg32_i64(r1, t1);
3155     }
3156     tcg_temp_free(t2);
3157     tcg_temp_free(t1);
3158 
3159     return DISAS_NEXT;
3160 }
3161 
3162 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3163 {
3164     int r1 = get_field(s, r1);
3165     int r3 = get_field(s, r3);
3166     TCGv_i64 t1, t2;
3167 
3168     /* Only one register to read. */
3169     t1 = tcg_temp_new_i64();
3170     if (unlikely(r1 == r3)) {
3171         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3172         store_reg32h_i64(r1, t1);
3173         tcg_temp_free(t1);
3174         return DISAS_NEXT;
3175     }
3176 
3177     /* First load the values of the first and last registers to trigger
3178        possible page faults. */
3179     t2 = tcg_temp_new_i64();
3180     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3181     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3182     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3183     store_reg32h_i64(r1, t1);
3184     store_reg32h_i64(r3, t2);
3185 
3186     /* Only two registers to read. */
3187     if (((r1 + 1) & 15) == r3) {
3188         tcg_temp_free(t2);
3189         tcg_temp_free(t1);
3190         return DISAS_NEXT;
3191     }
3192 
3193     /* Then load the remaining registers. Page fault can't occur. */
3194     r3 = (r3 - 1) & 15;
3195     tcg_gen_movi_i64(t2, 4);
3196     while (r1 != r3) {
3197         r1 = (r1 + 1) & 15;
3198         tcg_gen_add_i64(o->in2, o->in2, t2);
3199         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3200         store_reg32h_i64(r1, t1);
3201     }
3202     tcg_temp_free(t2);
3203     tcg_temp_free(t1);
3204 
3205     return DISAS_NEXT;
3206 }
3207 
3208 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3209 {
3210     int r1 = get_field(s, r1);
3211     int r3 = get_field(s, r3);
3212     TCGv_i64 t1, t2;
3213 
3214     /* Only one register to read. */
3215     if (unlikely(r1 == r3)) {
3216         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3217         return DISAS_NEXT;
3218     }
3219 
3220     /* First load the values of the first and last registers to trigger
3221        possible page faults. */
3222     t1 = tcg_temp_new_i64();
3223     t2 = tcg_temp_new_i64();
3224     tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3225     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3226     tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3227     tcg_gen_mov_i64(regs[r1], t1);
3228     tcg_temp_free(t2);
3229 
3230     /* Only two registers to read. */
3231     if (((r1 + 1) & 15) == r3) {
3232         tcg_temp_free(t1);
3233         return DISAS_NEXT;
3234     }
3235 
3236     /* Then load the remaining registers. Page fault can't occur. */
3237     r3 = (r3 - 1) & 15;
3238     tcg_gen_movi_i64(t1, 8);
3239     while (r1 != r3) {
3240         r1 = (r1 + 1) & 15;
3241         tcg_gen_add_i64(o->in2, o->in2, t1);
3242         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3243     }
3244     tcg_temp_free(t1);
3245 
3246     return DISAS_NEXT;
3247 }
3248 
3249 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3250 {
3251     TCGv_i64 a1, a2;
3252     MemOp mop = s->insn->data;
3253 
3254     /* In a parallel context, stop the world and single step.  */
3255     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3256         update_psw_addr(s);
3257         update_cc_op(s);
3258         gen_exception(EXCP_ATOMIC);
3259         return DISAS_NORETURN;
3260     }
3261 
3262     /* In a serial context, perform the two loads ... */
3263     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3264     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3265     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3266     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3267     tcg_temp_free_i64(a1);
3268     tcg_temp_free_i64(a2);
3269 
3270     /* ... and indicate that we performed them while interlocked.  */
3271     gen_op_movi_cc(s, 0);
3272     return DISAS_NEXT;
3273 }
3274 
3275 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3276 {
3277     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3278         gen_helper_lpq(o->out, cpu_env, o->in2);
3279     } else if (HAVE_ATOMIC128) {
3280         gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3281     } else {
3282         gen_helper_exit_atomic(cpu_env);
3283         return DISAS_NORETURN;
3284     }
3285     return_low128(o->out2);
3286     return DISAS_NEXT;
3287 }
3288 
3289 #ifndef CONFIG_USER_ONLY
3290 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3291 {
3292     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3293     return DISAS_NEXT;
3294 }
3295 #endif
3296 
3297 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3298 {
3299     tcg_gen_andi_i64(o->out, o->in2, -256);
3300     return DISAS_NEXT;
3301 }
3302 
3303 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3304 {
3305     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3306 
3307     if (get_field(s, m3) > 6) {
3308         gen_program_exception(s, PGM_SPECIFICATION);
3309         return DISAS_NORETURN;
3310     }
3311 
3312     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3313     tcg_gen_neg_i64(o->addr1, o->addr1);
3314     tcg_gen_movi_i64(o->out, 16);
3315     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3316     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3317     return DISAS_NEXT;
3318 }
3319 
3320 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3321 {
3322 #if !defined(CONFIG_USER_ONLY)
3323     TCGv_i32 i2;
3324 #endif
3325     const uint16_t monitor_class = get_field(s, i2);
3326 
3327     if (monitor_class & 0xff00) {
3328         gen_program_exception(s, PGM_SPECIFICATION);
3329         return DISAS_NORETURN;
3330     }
3331 
3332 #if !defined(CONFIG_USER_ONLY)
3333     i2 = tcg_const_i32(monitor_class);
3334     gen_helper_monitor_call(cpu_env, o->addr1, i2);
3335     tcg_temp_free_i32(i2);
3336 #endif
3337     /* Defaults to a NOP. */
3338     return DISAS_NEXT;
3339 }
3340 
3341 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3342 {
3343     o->out = o->in2;
3344     o->g_out = o->g_in2;
3345     o->in2 = NULL;
3346     o->g_in2 = false;
3347     return DISAS_NEXT;
3348 }
3349 
3350 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3351 {
3352     int b2 = get_field(s, b2);
3353     TCGv ar1 = tcg_temp_new_i64();
3354 
3355     o->out = o->in2;
3356     o->g_out = o->g_in2;
3357     o->in2 = NULL;
3358     o->g_in2 = false;
3359 
3360     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3361     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3362         tcg_gen_movi_i64(ar1, 0);
3363         break;
3364     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3365         tcg_gen_movi_i64(ar1, 1);
3366         break;
3367     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3368         if (b2) {
3369             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3370         } else {
3371             tcg_gen_movi_i64(ar1, 0);
3372         }
3373         break;
3374     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3375         tcg_gen_movi_i64(ar1, 2);
3376         break;
3377     }
3378 
3379     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3380     tcg_temp_free_i64(ar1);
3381 
3382     return DISAS_NEXT;
3383 }
3384 
3385 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3386 {
3387     o->out = o->in1;
3388     o->out2 = o->in2;
3389     o->g_out = o->g_in1;
3390     o->g_out2 = o->g_in2;
3391     o->in1 = NULL;
3392     o->in2 = NULL;
3393     o->g_in1 = o->g_in2 = false;
3394     return DISAS_NEXT;
3395 }
3396 
3397 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3398 {
3399     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3400     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3401     tcg_temp_free_i32(l);
3402     return DISAS_NEXT;
3403 }
3404 
3405 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3406 {
3407     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3408     return DISAS_NEXT;
3409 }
3410 
3411 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3412 {
3413     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3414     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3415     tcg_temp_free_i32(l);
3416     return DISAS_NEXT;
3417 }
3418 
3419 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3420 {
3421     int r1 = get_field(s, r1);
3422     int r2 = get_field(s, r2);
3423     TCGv_i32 t1, t2;
3424 
3425     /* r1 and r2 must be even.  */
3426     if (r1 & 1 || r2 & 1) {
3427         gen_program_exception(s, PGM_SPECIFICATION);
3428         return DISAS_NORETURN;
3429     }
3430 
3431     t1 = tcg_const_i32(r1);
3432     t2 = tcg_const_i32(r2);
3433     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3434     tcg_temp_free_i32(t1);
3435     tcg_temp_free_i32(t2);
3436     set_cc_static(s);
3437     return DISAS_NEXT;
3438 }
3439 
3440 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3441 {
3442     int r1 = get_field(s, r1);
3443     int r3 = get_field(s, r3);
3444     TCGv_i32 t1, t3;
3445 
3446     /* r1 and r3 must be even.  */
3447     if (r1 & 1 || r3 & 1) {
3448         gen_program_exception(s, PGM_SPECIFICATION);
3449         return DISAS_NORETURN;
3450     }
3451 
3452     t1 = tcg_const_i32(r1);
3453     t3 = tcg_const_i32(r3);
3454     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3455     tcg_temp_free_i32(t1);
3456     tcg_temp_free_i32(t3);
3457     set_cc_static(s);
3458     return DISAS_NEXT;
3459 }
3460 
3461 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3462 {
3463     int r1 = get_field(s, r1);
3464     int r3 = get_field(s, r3);
3465     TCGv_i32 t1, t3;
3466 
3467     /* r1 and r3 must be even.  */
3468     if (r1 & 1 || r3 & 1) {
3469         gen_program_exception(s, PGM_SPECIFICATION);
3470         return DISAS_NORETURN;
3471     }
3472 
3473     t1 = tcg_const_i32(r1);
3474     t3 = tcg_const_i32(r3);
3475     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3476     tcg_temp_free_i32(t1);
3477     tcg_temp_free_i32(t3);
3478     set_cc_static(s);
3479     return DISAS_NEXT;
3480 }
3481 
3482 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3483 {
3484     int r3 = get_field(s, r3);
3485     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3486     set_cc_static(s);
3487     return DISAS_NEXT;
3488 }
3489 
3490 #ifndef CONFIG_USER_ONLY
3491 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3492 {
3493     int r1 = get_field(s, l1);
3494     int r3 = get_field(s, r3);
3495     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3496     set_cc_static(s);
3497     return DISAS_NEXT;
3498 }
3499 
3500 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3501 {
3502     int r1 = get_field(s, l1);
3503     int r3 = get_field(s, r3);
3504     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3505     set_cc_static(s);
3506     return DISAS_NEXT;
3507 }
3508 #endif
3509 
3510 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3511 {
3512     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3513     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3514     tcg_temp_free_i32(l);
3515     return DISAS_NEXT;
3516 }
3517 
3518 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3519 {
3520     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3521     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3522     tcg_temp_free_i32(l);
3523     return DISAS_NEXT;
3524 }
3525 
3526 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3527 {
3528     TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3529     TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3530 
3531     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3532     tcg_temp_free_i32(t1);
3533     tcg_temp_free_i32(t2);
3534     set_cc_static(s);
3535     return DISAS_NEXT;
3536 }
3537 
3538 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3539 {
3540     TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3541     TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3542 
3543     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3544     tcg_temp_free_i32(t1);
3545     tcg_temp_free_i32(t2);
3546     set_cc_static(s);
3547     return DISAS_NEXT;
3548 }
3549 
3550 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3551 {
3552     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3553     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3554     tcg_temp_free_i32(l);
3555     return DISAS_NEXT;
3556 }
3557 
3558 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3559 {
3560     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3561     return DISAS_NEXT;
3562 }
3563 
3564 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3565 {
3566     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3567     return DISAS_NEXT;
3568 }
3569 
3570 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3571 {
3572     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3573     return DISAS_NEXT;
3574 }
3575 
3576 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3577 {
3578     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3579     return DISAS_NEXT;
3580 }
3581 
3582 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3583 {
3584     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3585     return DISAS_NEXT;
3586 }
3587 
3588 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3589 {
3590     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3591     return DISAS_NEXT;
3592 }
3593 
3594 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3595 {
3596     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3597     return DISAS_NEXT;
3598 }
3599 
3600 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3601 {
3602     gen_helper_mxdb(o->out_128, cpu_env, o->in1_128, o->in2);
3603     return DISAS_NEXT;
3604 }
3605 
3606 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3607 {
3608     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3609     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3610     tcg_temp_free_i64(r3);
3611     return DISAS_NEXT;
3612 }
3613 
3614 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3615 {
3616     TCGv_i64 r3 = load_freg(get_field(s, r3));
3617     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3618     tcg_temp_free_i64(r3);
3619     return DISAS_NEXT;
3620 }
3621 
3622 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3623 {
3624     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3625     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3626     tcg_temp_free_i64(r3);
3627     return DISAS_NEXT;
3628 }
3629 
3630 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3631 {
3632     TCGv_i64 r3 = load_freg(get_field(s, r3));
3633     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3634     tcg_temp_free_i64(r3);
3635     return DISAS_NEXT;
3636 }
3637 
3638 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3639 {
3640     TCGv_i64 z, n;
3641     z = tcg_const_i64(0);
3642     n = tcg_temp_new_i64();
3643     tcg_gen_neg_i64(n, o->in2);
3644     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3645     tcg_temp_free_i64(n);
3646     tcg_temp_free_i64(z);
3647     return DISAS_NEXT;
3648 }
3649 
3650 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3651 {
3652     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3653     return DISAS_NEXT;
3654 }
3655 
3656 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3657 {
3658     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3659     return DISAS_NEXT;
3660 }
3661 
3662 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3663 {
3664     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3665     tcg_gen_mov_i64(o->out2, o->in2);
3666     return DISAS_NEXT;
3667 }
3668 
3669 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3670 {
3671     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3672     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3673     tcg_temp_free_i32(l);
3674     set_cc_static(s);
3675     return DISAS_NEXT;
3676 }
3677 
3678 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3679 {
3680     tcg_gen_neg_i64(o->out, o->in2);
3681     return DISAS_NEXT;
3682 }
3683 
3684 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3685 {
3686     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3687     return DISAS_NEXT;
3688 }
3689 
3690 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3691 {
3692     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3693     return DISAS_NEXT;
3694 }
3695 
3696 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3697 {
3698     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3699     tcg_gen_mov_i64(o->out2, o->in2);
3700     return DISAS_NEXT;
3701 }
3702 
3703 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3704 {
3705     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3706     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3707     tcg_temp_free_i32(l);
3708     set_cc_static(s);
3709     return DISAS_NEXT;
3710 }
3711 
3712 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3713 {
3714     tcg_gen_or_i64(o->out, o->in1, o->in2);
3715     return DISAS_NEXT;
3716 }
3717 
3718 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3719 {
3720     int shift = s->insn->data & 0xff;
3721     int size = s->insn->data >> 8;
3722     uint64_t mask = ((1ull << size) - 1) << shift;
3723 
3724     assert(!o->g_in2);
3725     tcg_gen_shli_i64(o->in2, o->in2, shift);
3726     tcg_gen_or_i64(o->out, o->in1, o->in2);
3727 
3728     /* Produce the CC from only the bits manipulated.  */
3729     tcg_gen_andi_i64(cc_dst, o->out, mask);
3730     set_cc_nz_u64(s, cc_dst);
3731     return DISAS_NEXT;
3732 }
3733 
3734 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3735 {
3736     o->in1 = tcg_temp_new_i64();
3737 
3738     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3739         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3740     } else {
3741         /* Perform the atomic operation in memory. */
3742         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3743                                     s->insn->data);
3744     }
3745 
3746     /* Recompute also for atomic case: needed for setting CC. */
3747     tcg_gen_or_i64(o->out, o->in1, o->in2);
3748 
3749     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3750         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3751     }
3752     return DISAS_NEXT;
3753 }
3754 
3755 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3756 {
3757     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3758     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3759     tcg_temp_free_i32(l);
3760     return DISAS_NEXT;
3761 }
3762 
3763 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3764 {
3765     int l2 = get_field(s, l2) + 1;
3766     TCGv_i32 l;
3767 
3768     /* The length must not exceed 32 bytes.  */
3769     if (l2 > 32) {
3770         gen_program_exception(s, PGM_SPECIFICATION);
3771         return DISAS_NORETURN;
3772     }
3773     l = tcg_const_i32(l2);
3774     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3775     tcg_temp_free_i32(l);
3776     return DISAS_NEXT;
3777 }
3778 
3779 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3780 {
3781     int l2 = get_field(s, l2) + 1;
3782     TCGv_i32 l;
3783 
3784     /* The length must be even and should not exceed 64 bytes.  */
3785     if ((l2 & 1) || (l2 > 64)) {
3786         gen_program_exception(s, PGM_SPECIFICATION);
3787         return DISAS_NORETURN;
3788     }
3789     l = tcg_const_i32(l2);
3790     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3791     tcg_temp_free_i32(l);
3792     return DISAS_NEXT;
3793 }
3794 
3795 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3796 {
3797     const uint8_t m3 = get_field(s, m3);
3798 
3799     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3800         tcg_gen_ctpop_i64(o->out, o->in2);
3801     } else {
3802         gen_helper_popcnt(o->out, o->in2);
3803     }
3804     return DISAS_NEXT;
3805 }
3806 
3807 #ifndef CONFIG_USER_ONLY
3808 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3809 {
3810     gen_helper_ptlb(cpu_env);
3811     return DISAS_NEXT;
3812 }
3813 #endif
3814 
3815 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3816 {
3817     int i3 = get_field(s, i3);
3818     int i4 = get_field(s, i4);
3819     int i5 = get_field(s, i5);
3820     int do_zero = i4 & 0x80;
3821     uint64_t mask, imask, pmask;
3822     int pos, len, rot;
3823 
3824     /* Adjust the arguments for the specific insn.  */
3825     switch (s->fields.op2) {
3826     case 0x55: /* risbg */
3827     case 0x59: /* risbgn */
3828         i3 &= 63;
3829         i4 &= 63;
3830         pmask = ~0;
3831         break;
3832     case 0x5d: /* risbhg */
3833         i3 &= 31;
3834         i4 &= 31;
3835         pmask = 0xffffffff00000000ull;
3836         break;
3837     case 0x51: /* risblg */
3838         i3 = (i3 & 31) + 32;
3839         i4 = (i4 & 31) + 32;
3840         pmask = 0x00000000ffffffffull;
3841         break;
3842     default:
3843         g_assert_not_reached();
3844     }
3845 
3846     /* MASK is the set of bits to be inserted from R2. */
3847     if (i3 <= i4) {
3848         /* [0...i3---i4...63] */
3849         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3850     } else {
3851         /* [0---i4...i3---63] */
3852         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3853     }
3854     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3855     mask &= pmask;
3856 
3857     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3858        insns, we need to keep the other half of the register.  */
3859     imask = ~mask | ~pmask;
3860     if (do_zero) {
3861         imask = ~pmask;
3862     }
3863 
3864     len = i4 - i3 + 1;
3865     pos = 63 - i4;
3866     rot = i5 & 63;
3867 
3868     /* In some cases we can implement this with extract.  */
3869     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3870         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3871         return DISAS_NEXT;
3872     }
3873 
3874     /* In some cases we can implement this with deposit.  */
3875     if (len > 0 && (imask == 0 || ~mask == imask)) {
3876         /* Note that we rotate the bits to be inserted to the lsb, not to
3877            the position as described in the PoO.  */
3878         rot = (rot - pos) & 63;
3879     } else {
3880         pos = -1;
3881     }
3882 
3883     /* Rotate the input as necessary.  */
3884     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3885 
3886     /* Insert the selected bits into the output.  */
3887     if (pos >= 0) {
3888         if (imask == 0) {
3889             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3890         } else {
3891             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3892         }
3893     } else if (imask == 0) {
3894         tcg_gen_andi_i64(o->out, o->in2, mask);
3895     } else {
3896         tcg_gen_andi_i64(o->in2, o->in2, mask);
3897         tcg_gen_andi_i64(o->out, o->out, imask);
3898         tcg_gen_or_i64(o->out, o->out, o->in2);
3899     }
3900     return DISAS_NEXT;
3901 }
3902 
3903 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3904 {
3905     int i3 = get_field(s, i3);
3906     int i4 = get_field(s, i4);
3907     int i5 = get_field(s, i5);
3908     uint64_t mask;
3909 
3910     /* If this is a test-only form, arrange to discard the result.  */
3911     if (i3 & 0x80) {
3912         o->out = tcg_temp_new_i64();
3913         o->g_out = false;
3914     }
3915 
3916     i3 &= 63;
3917     i4 &= 63;
3918     i5 &= 63;
3919 
3920     /* MASK is the set of bits to be operated on from R2.
3921        Take care for I3/I4 wraparound.  */
3922     mask = ~0ull >> i3;
3923     if (i3 <= i4) {
3924         mask ^= ~0ull >> i4 >> 1;
3925     } else {
3926         mask |= ~(~0ull >> i4 >> 1);
3927     }
3928 
3929     /* Rotate the input as necessary.  */
3930     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3931 
3932     /* Operate.  */
3933     switch (s->fields.op2) {
3934     case 0x54: /* AND */
3935         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3936         tcg_gen_and_i64(o->out, o->out, o->in2);
3937         break;
3938     case 0x56: /* OR */
3939         tcg_gen_andi_i64(o->in2, o->in2, mask);
3940         tcg_gen_or_i64(o->out, o->out, o->in2);
3941         break;
3942     case 0x57: /* XOR */
3943         tcg_gen_andi_i64(o->in2, o->in2, mask);
3944         tcg_gen_xor_i64(o->out, o->out, o->in2);
3945         break;
3946     default:
3947         abort();
3948     }
3949 
3950     /* Set the CC.  */
3951     tcg_gen_andi_i64(cc_dst, o->out, mask);
3952     set_cc_nz_u64(s, cc_dst);
3953     return DISAS_NEXT;
3954 }
3955 
3956 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3957 {
3958     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3959     return DISAS_NEXT;
3960 }
3961 
3962 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3963 {
3964     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3965     return DISAS_NEXT;
3966 }
3967 
3968 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3969 {
3970     tcg_gen_bswap64_i64(o->out, o->in2);
3971     return DISAS_NEXT;
3972 }
3973 
3974 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3975 {
3976     TCGv_i32 t1 = tcg_temp_new_i32();
3977     TCGv_i32 t2 = tcg_temp_new_i32();
3978     TCGv_i32 to = tcg_temp_new_i32();
3979     tcg_gen_extrl_i64_i32(t1, o->in1);
3980     tcg_gen_extrl_i64_i32(t2, o->in2);
3981     tcg_gen_rotl_i32(to, t1, t2);
3982     tcg_gen_extu_i32_i64(o->out, to);
3983     tcg_temp_free_i32(t1);
3984     tcg_temp_free_i32(t2);
3985     tcg_temp_free_i32(to);
3986     return DISAS_NEXT;
3987 }
3988 
3989 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3990 {
3991     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3992     return DISAS_NEXT;
3993 }
3994 
3995 #ifndef CONFIG_USER_ONLY
3996 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3997 {
3998     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3999     set_cc_static(s);
4000     return DISAS_NEXT;
4001 }
4002 
4003 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
4004 {
4005     gen_helper_sacf(cpu_env, o->in2);
4006     /* Addressing mode has changed, so end the block.  */
4007     return DISAS_TOO_MANY;
4008 }
4009 #endif
4010 
4011 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
4012 {
4013     int sam = s->insn->data;
4014     TCGv_i64 tsam;
4015     uint64_t mask;
4016 
4017     switch (sam) {
4018     case 0:
4019         mask = 0xffffff;
4020         break;
4021     case 1:
4022         mask = 0x7fffffff;
4023         break;
4024     default:
4025         mask = -1;
4026         break;
4027     }
4028 
4029     /* Bizarre but true, we check the address of the current insn for the
4030        specification exception, not the next to be executed.  Thus the PoO
4031        documents that Bad Things Happen two bytes before the end.  */
4032     if (s->base.pc_next & ~mask) {
4033         gen_program_exception(s, PGM_SPECIFICATION);
4034         return DISAS_NORETURN;
4035     }
4036     s->pc_tmp &= mask;
4037 
4038     tsam = tcg_const_i64(sam);
4039     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
4040     tcg_temp_free_i64(tsam);
4041 
4042     /* Always exit the TB, since we (may have) changed execution mode.  */
4043     return DISAS_TOO_MANY;
4044 }
4045 
4046 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4047 {
4048     int r1 = get_field(s, r1);
4049     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4050     return DISAS_NEXT;
4051 }
4052 
4053 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4054 {
4055     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4056     return DISAS_NEXT;
4057 }
4058 
4059 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4060 {
4061     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4062     return DISAS_NEXT;
4063 }
4064 
4065 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4066 {
4067     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
4068     return DISAS_NEXT;
4069 }
4070 
4071 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4072 {
4073     gen_helper_sqeb(o->out, cpu_env, o->in2);
4074     return DISAS_NEXT;
4075 }
4076 
4077 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4078 {
4079     gen_helper_sqdb(o->out, cpu_env, o->in2);
4080     return DISAS_NEXT;
4081 }
4082 
4083 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4084 {
4085     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
4086     return DISAS_NEXT;
4087 }
4088 
4089 #ifndef CONFIG_USER_ONLY
4090 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4091 {
4092     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4093     set_cc_static(s);
4094     return DISAS_NEXT;
4095 }
4096 
4097 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4098 {
4099     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4100     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4101     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4102     set_cc_static(s);
4103     tcg_temp_free_i32(r1);
4104     tcg_temp_free_i32(r3);
4105     return DISAS_NEXT;
4106 }
4107 #endif
4108 
4109 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4110 {
4111     DisasCompare c;
4112     TCGv_i64 a, h;
4113     TCGLabel *lab;
4114     int r1;
4115 
4116     disas_jcc(s, &c, get_field(s, m3));
4117 
4118     /* We want to store when the condition is fulfilled, so branch
4119        out when it's not */
4120     c.cond = tcg_invert_cond(c.cond);
4121 
4122     lab = gen_new_label();
4123     if (c.is_64) {
4124         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4125     } else {
4126         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4127     }
4128     free_compare(&c);
4129 
4130     r1 = get_field(s, r1);
4131     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4132     switch (s->insn->data) {
4133     case 1: /* STOCG */
4134         tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4135         break;
4136     case 0: /* STOC */
4137         tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4138         break;
4139     case 2: /* STOCFH */
4140         h = tcg_temp_new_i64();
4141         tcg_gen_shri_i64(h, regs[r1], 32);
4142         tcg_gen_qemu_st32(h, a, get_mem_index(s));
4143         tcg_temp_free_i64(h);
4144         break;
4145     default:
4146         g_assert_not_reached();
4147     }
4148     tcg_temp_free_i64(a);
4149 
4150     gen_set_label(lab);
4151     return DISAS_NEXT;
4152 }
4153 
4154 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4155 {
4156     TCGv_i64 t;
4157     uint64_t sign = 1ull << s->insn->data;
4158     if (s->insn->data == 31) {
4159         t = tcg_temp_new_i64();
4160         tcg_gen_shli_i64(t, o->in1, 32);
4161     } else {
4162         t = o->in1;
4163     }
4164     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4165     if (s->insn->data == 31) {
4166         tcg_temp_free_i64(t);
4167     }
4168     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4169     /* The arithmetic left shift is curious in that it does not affect
4170        the sign bit.  Copy that over from the source unchanged.  */
4171     tcg_gen_andi_i64(o->out, o->out, ~sign);
4172     tcg_gen_andi_i64(o->in1, o->in1, sign);
4173     tcg_gen_or_i64(o->out, o->out, o->in1);
4174     return DISAS_NEXT;
4175 }
4176 
4177 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4178 {
4179     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4180     return DISAS_NEXT;
4181 }
4182 
4183 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4184 {
4185     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4186     return DISAS_NEXT;
4187 }
4188 
4189 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4190 {
4191     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4192     return DISAS_NEXT;
4193 }
4194 
4195 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4196 {
4197     gen_helper_sfpc(cpu_env, o->in2);
4198     return DISAS_NEXT;
4199 }
4200 
4201 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4202 {
4203     gen_helper_sfas(cpu_env, o->in2);
4204     return DISAS_NEXT;
4205 }
4206 
4207 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4208 {
4209     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4210     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4211     gen_helper_srnm(cpu_env, o->addr1);
4212     return DISAS_NEXT;
4213 }
4214 
4215 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4216 {
4217     /* Bits 0-55 are are ignored. */
4218     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4219     gen_helper_srnm(cpu_env, o->addr1);
4220     return DISAS_NEXT;
4221 }
4222 
4223 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4224 {
4225     TCGv_i64 tmp = tcg_temp_new_i64();
4226 
4227     /* Bits other than 61-63 are ignored. */
4228     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4229 
4230     /* No need to call a helper, we don't implement dfp */
4231     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4232     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4233     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4234 
4235     tcg_temp_free_i64(tmp);
4236     return DISAS_NEXT;
4237 }
4238 
4239 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4240 {
4241     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4242     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4243     set_cc_static(s);
4244 
4245     tcg_gen_shri_i64(o->in1, o->in1, 24);
4246     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4247     return DISAS_NEXT;
4248 }
4249 
4250 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4251 {
4252     int b1 = get_field(s, b1);
4253     int d1 = get_field(s, d1);
4254     int b2 = get_field(s, b2);
4255     int d2 = get_field(s, d2);
4256     int r3 = get_field(s, r3);
4257     TCGv_i64 tmp = tcg_temp_new_i64();
4258 
4259     /* fetch all operands first */
4260     o->in1 = tcg_temp_new_i64();
4261     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4262     o->in2 = tcg_temp_new_i64();
4263     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4264     o->addr1 = tcg_temp_new_i64();
4265     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4266 
4267     /* load the third operand into r3 before modifying anything */
4268     tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4269 
4270     /* subtract CPU timer from first operand and store in GR0 */
4271     gen_helper_stpt(tmp, cpu_env);
4272     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4273 
4274     /* store second operand in GR1 */
4275     tcg_gen_mov_i64(regs[1], o->in2);
4276 
4277     tcg_temp_free_i64(tmp);
4278     return DISAS_NEXT;
4279 }
4280 
4281 #ifndef CONFIG_USER_ONLY
4282 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4283 {
4284     tcg_gen_shri_i64(o->in2, o->in2, 4);
4285     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4286     return DISAS_NEXT;
4287 }
4288 
4289 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4290 {
4291     gen_helper_sske(cpu_env, o->in1, o->in2);
4292     return DISAS_NEXT;
4293 }
4294 
4295 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4296 {
4297     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4298     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4299     s->exit_to_mainloop = true;
4300     return DISAS_TOO_MANY;
4301 }
4302 
4303 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4304 {
4305     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4306     return DISAS_NEXT;
4307 }
4308 #endif
4309 
4310 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4311 {
4312     gen_helper_stck(o->out, cpu_env);
4313     /* ??? We don't implement clock states.  */
4314     gen_op_movi_cc(s, 0);
4315     return DISAS_NEXT;
4316 }
4317 
4318 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4319 {
4320     TCGv_i64 c1 = tcg_temp_new_i64();
4321     TCGv_i64 c2 = tcg_temp_new_i64();
4322     TCGv_i64 todpr = tcg_temp_new_i64();
4323     gen_helper_stck(c1, cpu_env);
4324     /* 16 bit value store in an uint32_t (only valid bits set) */
4325     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4326     /* Shift the 64-bit value into its place as a zero-extended
4327        104-bit value.  Note that "bit positions 64-103 are always
4328        non-zero so that they compare differently to STCK"; we set
4329        the least significant bit to 1.  */
4330     tcg_gen_shli_i64(c2, c1, 56);
4331     tcg_gen_shri_i64(c1, c1, 8);
4332     tcg_gen_ori_i64(c2, c2, 0x10000);
4333     tcg_gen_or_i64(c2, c2, todpr);
4334     tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4335     tcg_gen_addi_i64(o->in2, o->in2, 8);
4336     tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4337     tcg_temp_free_i64(c1);
4338     tcg_temp_free_i64(c2);
4339     tcg_temp_free_i64(todpr);
4340     /* ??? We don't implement clock states.  */
4341     gen_op_movi_cc(s, 0);
4342     return DISAS_NEXT;
4343 }
4344 
4345 #ifndef CONFIG_USER_ONLY
4346 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4347 {
4348     gen_helper_sck(cc_op, cpu_env, o->in2);
4349     set_cc_static(s);
4350     return DISAS_NEXT;
4351 }
4352 
4353 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4354 {
4355     gen_helper_sckc(cpu_env, o->in2);
4356     return DISAS_NEXT;
4357 }
4358 
4359 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4360 {
4361     gen_helper_sckpf(cpu_env, regs[0]);
4362     return DISAS_NEXT;
4363 }
4364 
4365 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4366 {
4367     gen_helper_stckc(o->out, cpu_env);
4368     return DISAS_NEXT;
4369 }
4370 
4371 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4372 {
4373     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4374     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4375     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4376     tcg_temp_free_i32(r1);
4377     tcg_temp_free_i32(r3);
4378     return DISAS_NEXT;
4379 }
4380 
4381 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4382 {
4383     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4384     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4385     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4386     tcg_temp_free_i32(r1);
4387     tcg_temp_free_i32(r3);
4388     return DISAS_NEXT;
4389 }
4390 
4391 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4392 {
4393     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4394     return DISAS_NEXT;
4395 }
4396 
4397 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4398 {
4399     gen_helper_spt(cpu_env, o->in2);
4400     return DISAS_NEXT;
4401 }
4402 
4403 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4404 {
4405     gen_helper_stfl(cpu_env);
4406     return DISAS_NEXT;
4407 }
4408 
4409 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4410 {
4411     gen_helper_stpt(o->out, cpu_env);
4412     return DISAS_NEXT;
4413 }
4414 
4415 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4416 {
4417     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4418     set_cc_static(s);
4419     return DISAS_NEXT;
4420 }
4421 
4422 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4423 {
4424     gen_helper_spx(cpu_env, o->in2);
4425     return DISAS_NEXT;
4426 }
4427 
4428 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4429 {
4430     gen_helper_xsch(cpu_env, regs[1]);
4431     set_cc_static(s);
4432     return DISAS_NEXT;
4433 }
4434 
4435 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4436 {
4437     gen_helper_csch(cpu_env, regs[1]);
4438     set_cc_static(s);
4439     return DISAS_NEXT;
4440 }
4441 
4442 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4443 {
4444     gen_helper_hsch(cpu_env, regs[1]);
4445     set_cc_static(s);
4446     return DISAS_NEXT;
4447 }
4448 
4449 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4450 {
4451     gen_helper_msch(cpu_env, regs[1], o->in2);
4452     set_cc_static(s);
4453     return DISAS_NEXT;
4454 }
4455 
4456 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4457 {
4458     gen_helper_rchp(cpu_env, regs[1]);
4459     set_cc_static(s);
4460     return DISAS_NEXT;
4461 }
4462 
4463 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4464 {
4465     gen_helper_rsch(cpu_env, regs[1]);
4466     set_cc_static(s);
4467     return DISAS_NEXT;
4468 }
4469 
4470 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4471 {
4472     gen_helper_sal(cpu_env, regs[1]);
4473     return DISAS_NEXT;
4474 }
4475 
4476 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4477 {
4478     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4479     return DISAS_NEXT;
4480 }
4481 
4482 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4483 {
4484     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4485     gen_op_movi_cc(s, 3);
4486     return DISAS_NEXT;
4487 }
4488 
4489 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4490 {
4491     /* The instruction is suppressed if not provided. */
4492     return DISAS_NEXT;
4493 }
4494 
4495 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4496 {
4497     gen_helper_ssch(cpu_env, regs[1], o->in2);
4498     set_cc_static(s);
4499     return DISAS_NEXT;
4500 }
4501 
4502 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4503 {
4504     gen_helper_stsch(cpu_env, regs[1], o->in2);
4505     set_cc_static(s);
4506     return DISAS_NEXT;
4507 }
4508 
4509 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4510 {
4511     gen_helper_stcrw(cpu_env, o->in2);
4512     set_cc_static(s);
4513     return DISAS_NEXT;
4514 }
4515 
4516 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4517 {
4518     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4519     set_cc_static(s);
4520     return DISAS_NEXT;
4521 }
4522 
4523 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4524 {
4525     gen_helper_tsch(cpu_env, regs[1], o->in2);
4526     set_cc_static(s);
4527     return DISAS_NEXT;
4528 }
4529 
4530 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4531 {
4532     gen_helper_chsc(cpu_env, o->in2);
4533     set_cc_static(s);
4534     return DISAS_NEXT;
4535 }
4536 
4537 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4538 {
4539     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4540     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4541     return DISAS_NEXT;
4542 }
4543 
4544 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4545 {
4546     uint64_t i2 = get_field(s, i2);
4547     TCGv_i64 t;
4548 
4549     /* It is important to do what the instruction name says: STORE THEN.
4550        If we let the output hook perform the store then if we fault and
4551        restart, we'll have the wrong SYSTEM MASK in place.  */
4552     t = tcg_temp_new_i64();
4553     tcg_gen_shri_i64(t, psw_mask, 56);
4554     tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4555     tcg_temp_free_i64(t);
4556 
4557     if (s->fields.op == 0xac) {
4558         tcg_gen_andi_i64(psw_mask, psw_mask,
4559                          (i2 << 56) | 0x00ffffffffffffffull);
4560     } else {
4561         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4562     }
4563 
4564     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4565     s->exit_to_mainloop = true;
4566     return DISAS_TOO_MANY;
4567 }
4568 
4569 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4570 {
4571     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4572 
4573     if (s->base.tb->flags & FLAG_MASK_PER) {
4574         update_psw_addr(s);
4575         gen_helper_per_store_real(cpu_env);
4576     }
4577     return DISAS_NEXT;
4578 }
4579 #endif
4580 
4581 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4582 {
4583     gen_helper_stfle(cc_op, cpu_env, o->in2);
4584     set_cc_static(s);
4585     return DISAS_NEXT;
4586 }
4587 
4588 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4589 {
4590     tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4591     return DISAS_NEXT;
4592 }
4593 
4594 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4595 {
4596     tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4597     return DISAS_NEXT;
4598 }
4599 
4600 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4601 {
4602     tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4603     return DISAS_NEXT;
4604 }
4605 
4606 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4607 {
4608     tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4609     return DISAS_NEXT;
4610 }
4611 
4612 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4613 {
4614     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4615     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4616     gen_helper_stam(cpu_env, r1, o->in2, r3);
4617     tcg_temp_free_i32(r1);
4618     tcg_temp_free_i32(r3);
4619     return DISAS_NEXT;
4620 }
4621 
4622 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4623 {
4624     int m3 = get_field(s, m3);
4625     int pos, base = s->insn->data;
4626     TCGv_i64 tmp = tcg_temp_new_i64();
4627 
4628     pos = base + ctz32(m3) * 8;
4629     switch (m3) {
4630     case 0xf:
4631         /* Effectively a 32-bit store.  */
4632         tcg_gen_shri_i64(tmp, o->in1, pos);
4633         tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4634         break;
4635 
4636     case 0xc:
4637     case 0x6:
4638     case 0x3:
4639         /* Effectively a 16-bit store.  */
4640         tcg_gen_shri_i64(tmp, o->in1, pos);
4641         tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4642         break;
4643 
4644     case 0x8:
4645     case 0x4:
4646     case 0x2:
4647     case 0x1:
4648         /* Effectively an 8-bit store.  */
4649         tcg_gen_shri_i64(tmp, o->in1, pos);
4650         tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4651         break;
4652 
4653     default:
4654         /* This is going to be a sequence of shifts and stores.  */
4655         pos = base + 32 - 8;
4656         while (m3) {
4657             if (m3 & 0x8) {
4658                 tcg_gen_shri_i64(tmp, o->in1, pos);
4659                 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4660                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4661             }
4662             m3 = (m3 << 1) & 0xf;
4663             pos -= 8;
4664         }
4665         break;
4666     }
4667     tcg_temp_free_i64(tmp);
4668     return DISAS_NEXT;
4669 }
4670 
4671 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4672 {
4673     int r1 = get_field(s, r1);
4674     int r3 = get_field(s, r3);
4675     int size = s->insn->data;
4676     TCGv_i64 tsize = tcg_const_i64(size);
4677 
4678     while (1) {
4679         if (size == 8) {
4680             tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4681         } else {
4682             tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4683         }
4684         if (r1 == r3) {
4685             break;
4686         }
4687         tcg_gen_add_i64(o->in2, o->in2, tsize);
4688         r1 = (r1 + 1) & 15;
4689     }
4690 
4691     tcg_temp_free_i64(tsize);
4692     return DISAS_NEXT;
4693 }
4694 
4695 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4696 {
4697     int r1 = get_field(s, r1);
4698     int r3 = get_field(s, r3);
4699     TCGv_i64 t = tcg_temp_new_i64();
4700     TCGv_i64 t4 = tcg_const_i64(4);
4701     TCGv_i64 t32 = tcg_const_i64(32);
4702 
4703     while (1) {
4704         tcg_gen_shl_i64(t, regs[r1], t32);
4705         tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4706         if (r1 == r3) {
4707             break;
4708         }
4709         tcg_gen_add_i64(o->in2, o->in2, t4);
4710         r1 = (r1 + 1) & 15;
4711     }
4712 
4713     tcg_temp_free_i64(t);
4714     tcg_temp_free_i64(t4);
4715     tcg_temp_free_i64(t32);
4716     return DISAS_NEXT;
4717 }
4718 
4719 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4720 {
4721     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4722         gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4723     } else if (HAVE_ATOMIC128) {
4724         gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4725     } else {
4726         gen_helper_exit_atomic(cpu_env);
4727         return DISAS_NORETURN;
4728     }
4729     return DISAS_NEXT;
4730 }
4731 
4732 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4733 {
4734     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4735     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4736 
4737     gen_helper_srst(cpu_env, r1, r2);
4738 
4739     tcg_temp_free_i32(r1);
4740     tcg_temp_free_i32(r2);
4741     set_cc_static(s);
4742     return DISAS_NEXT;
4743 }
4744 
4745 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4746 {
4747     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4748     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4749 
4750     gen_helper_srstu(cpu_env, r1, r2);
4751 
4752     tcg_temp_free_i32(r1);
4753     tcg_temp_free_i32(r2);
4754     set_cc_static(s);
4755     return DISAS_NEXT;
4756 }
4757 
4758 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4759 {
4760     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4761     return DISAS_NEXT;
4762 }
4763 
4764 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4765 {
4766     tcg_gen_movi_i64(cc_src, 0);
4767     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4768     return DISAS_NEXT;
4769 }
4770 
4771 /* Compute borrow (0, -1) into cc_src. */
4772 static void compute_borrow(DisasContext *s)
4773 {
4774     switch (s->cc_op) {
4775     case CC_OP_SUBU:
4776         /* The borrow value is already in cc_src (0,-1). */
4777         break;
4778     default:
4779         gen_op_calc_cc(s);
4780         /* fall through */
4781     case CC_OP_STATIC:
4782         /* The carry flag is the msb of CC; compute into cc_src. */
4783         tcg_gen_extu_i32_i64(cc_src, cc_op);
4784         tcg_gen_shri_i64(cc_src, cc_src, 1);
4785         /* fall through */
4786     case CC_OP_ADDU:
4787         /* Convert carry (1,0) to borrow (0,-1). */
4788         tcg_gen_subi_i64(cc_src, cc_src, 1);
4789         break;
4790     }
4791 }
4792 
4793 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4794 {
4795     compute_borrow(s);
4796 
4797     /* Borrow is {0, -1}, so add to subtract. */
4798     tcg_gen_add_i64(o->out, o->in1, cc_src);
4799     tcg_gen_sub_i64(o->out, o->out, o->in2);
4800     return DISAS_NEXT;
4801 }
4802 
4803 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4804 {
4805     compute_borrow(s);
4806 
4807     /*
4808      * Borrow is {0, -1}, so add to subtract; replicate the
4809      * borrow input to produce 128-bit -1 for the addition.
4810      */
4811     TCGv_i64 zero = tcg_const_i64(0);
4812     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4813     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4814     tcg_temp_free_i64(zero);
4815 
4816     return DISAS_NEXT;
4817 }
4818 
4819 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4820 {
4821     TCGv_i32 t;
4822 
4823     update_psw_addr(s);
4824     update_cc_op(s);
4825 
4826     t = tcg_const_i32(get_field(s, i1) & 0xff);
4827     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4828     tcg_temp_free_i32(t);
4829 
4830     t = tcg_const_i32(s->ilen);
4831     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4832     tcg_temp_free_i32(t);
4833 
4834     gen_exception(EXCP_SVC);
4835     return DISAS_NORETURN;
4836 }
4837 
4838 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4839 {
4840     int cc = 0;
4841 
4842     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4843     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4844     gen_op_movi_cc(s, cc);
4845     return DISAS_NEXT;
4846 }
4847 
4848 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4849 {
4850     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4851     set_cc_static(s);
4852     return DISAS_NEXT;
4853 }
4854 
4855 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4856 {
4857     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4858     set_cc_static(s);
4859     return DISAS_NEXT;
4860 }
4861 
4862 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4863 {
4864     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4865     set_cc_static(s);
4866     return DISAS_NEXT;
4867 }
4868 
4869 #ifndef CONFIG_USER_ONLY
4870 
4871 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4872 {
4873     gen_helper_testblock(cc_op, cpu_env, o->in2);
4874     set_cc_static(s);
4875     return DISAS_NEXT;
4876 }
4877 
4878 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4879 {
4880     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4881     set_cc_static(s);
4882     return DISAS_NEXT;
4883 }
4884 
4885 #endif
4886 
4887 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4888 {
4889     TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4890     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4891     tcg_temp_free_i32(l1);
4892     set_cc_static(s);
4893     return DISAS_NEXT;
4894 }
4895 
4896 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4897 {
4898     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4899     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4900     tcg_temp_free_i32(l);
4901     set_cc_static(s);
4902     return DISAS_NEXT;
4903 }
4904 
4905 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4906 {
4907     TCGv_i128 pair = tcg_temp_new_i128();
4908 
4909     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4910     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4911     tcg_temp_free_i128(pair);
4912     set_cc_static(s);
4913     return DISAS_NEXT;
4914 }
4915 
4916 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4917 {
4918     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4919     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4920     tcg_temp_free_i32(l);
4921     set_cc_static(s);
4922     return DISAS_NEXT;
4923 }
4924 
4925 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4926 {
4927     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4928     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4929     tcg_temp_free_i32(l);
4930     set_cc_static(s);
4931     return DISAS_NEXT;
4932 }
4933 
4934 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4935 {
4936     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4937     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4938     TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4939     TCGv_i32 tst = tcg_temp_new_i32();
4940     int m3 = get_field(s, m3);
4941 
4942     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4943         m3 = 0;
4944     }
4945     if (m3 & 1) {
4946         tcg_gen_movi_i32(tst, -1);
4947     } else {
4948         tcg_gen_extrl_i64_i32(tst, regs[0]);
4949         if (s->insn->opc & 3) {
4950             tcg_gen_ext8u_i32(tst, tst);
4951         } else {
4952             tcg_gen_ext16u_i32(tst, tst);
4953         }
4954     }
4955     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4956 
4957     tcg_temp_free_i32(r1);
4958     tcg_temp_free_i32(r2);
4959     tcg_temp_free_i32(sizes);
4960     tcg_temp_free_i32(tst);
4961     set_cc_static(s);
4962     return DISAS_NEXT;
4963 }
4964 
4965 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4966 {
4967     TCGv_i32 t1 = tcg_const_i32(0xff);
4968     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4969     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4970     tcg_temp_free_i32(t1);
4971     set_cc_static(s);
4972     return DISAS_NEXT;
4973 }
4974 
4975 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4976 {
4977     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4978     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4979     tcg_temp_free_i32(l);
4980     return DISAS_NEXT;
4981 }
4982 
4983 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4984 {
4985     int l1 = get_field(s, l1) + 1;
4986     TCGv_i32 l;
4987 
4988     /* The length must not exceed 32 bytes.  */
4989     if (l1 > 32) {
4990         gen_program_exception(s, PGM_SPECIFICATION);
4991         return DISAS_NORETURN;
4992     }
4993     l = tcg_const_i32(l1);
4994     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4995     tcg_temp_free_i32(l);
4996     set_cc_static(s);
4997     return DISAS_NEXT;
4998 }
4999 
5000 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
5001 {
5002     int l1 = get_field(s, l1) + 1;
5003     TCGv_i32 l;
5004 
5005     /* The length must be even and should not exceed 64 bytes.  */
5006     if ((l1 & 1) || (l1 > 64)) {
5007         gen_program_exception(s, PGM_SPECIFICATION);
5008         return DISAS_NORETURN;
5009     }
5010     l = tcg_const_i32(l1);
5011     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
5012     tcg_temp_free_i32(l);
5013     set_cc_static(s);
5014     return DISAS_NEXT;
5015 }
5016 
5017 
5018 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
5019 {
5020     int d1 = get_field(s, d1);
5021     int d2 = get_field(s, d2);
5022     int b1 = get_field(s, b1);
5023     int b2 = get_field(s, b2);
5024     int l = get_field(s, l1);
5025     TCGv_i32 t32;
5026 
5027     o->addr1 = get_address(s, 0, b1, d1);
5028 
5029     /* If the addresses are identical, this is a store/memset of zero.  */
5030     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
5031         o->in2 = tcg_const_i64(0);
5032 
5033         l++;
5034         while (l >= 8) {
5035             tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
5036             l -= 8;
5037             if (l > 0) {
5038                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
5039             }
5040         }
5041         if (l >= 4) {
5042             tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
5043             l -= 4;
5044             if (l > 0) {
5045                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
5046             }
5047         }
5048         if (l >= 2) {
5049             tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
5050             l -= 2;
5051             if (l > 0) {
5052                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
5053             }
5054         }
5055         if (l) {
5056             tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
5057         }
5058         gen_op_movi_cc(s, 0);
5059         return DISAS_NEXT;
5060     }
5061 
5062     /* But in general we'll defer to a helper.  */
5063     o->in2 = get_address(s, 0, b2, d2);
5064     t32 = tcg_const_i32(l);
5065     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5066     tcg_temp_free_i32(t32);
5067     set_cc_static(s);
5068     return DISAS_NEXT;
5069 }
5070 
5071 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5072 {
5073     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5074     return DISAS_NEXT;
5075 }
5076 
5077 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5078 {
5079     int shift = s->insn->data & 0xff;
5080     int size = s->insn->data >> 8;
5081     uint64_t mask = ((1ull << size) - 1) << shift;
5082 
5083     assert(!o->g_in2);
5084     tcg_gen_shli_i64(o->in2, o->in2, shift);
5085     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5086 
5087     /* Produce the CC from only the bits manipulated.  */
5088     tcg_gen_andi_i64(cc_dst, o->out, mask);
5089     set_cc_nz_u64(s, cc_dst);
5090     return DISAS_NEXT;
5091 }
5092 
5093 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5094 {
5095     o->in1 = tcg_temp_new_i64();
5096 
5097     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5098         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5099     } else {
5100         /* Perform the atomic operation in memory. */
5101         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5102                                      s->insn->data);
5103     }
5104 
5105     /* Recompute also for atomic case: needed for setting CC. */
5106     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5107 
5108     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5109         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5110     }
5111     return DISAS_NEXT;
5112 }
5113 
5114 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5115 {
5116     o->out = tcg_const_i64(0);
5117     return DISAS_NEXT;
5118 }
5119 
5120 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5121 {
5122     o->out = tcg_const_i64(0);
5123     o->out2 = o->out;
5124     o->g_out2 = true;
5125     return DISAS_NEXT;
5126 }
5127 
5128 #ifndef CONFIG_USER_ONLY
5129 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5130 {
5131     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5132 
5133     gen_helper_clp(cpu_env, r2);
5134     tcg_temp_free_i32(r2);
5135     set_cc_static(s);
5136     return DISAS_NEXT;
5137 }
5138 
5139 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5140 {
5141     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5142     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5143 
5144     gen_helper_pcilg(cpu_env, r1, r2);
5145     tcg_temp_free_i32(r1);
5146     tcg_temp_free_i32(r2);
5147     set_cc_static(s);
5148     return DISAS_NEXT;
5149 }
5150 
5151 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5152 {
5153     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5154     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5155 
5156     gen_helper_pcistg(cpu_env, r1, r2);
5157     tcg_temp_free_i32(r1);
5158     tcg_temp_free_i32(r2);
5159     set_cc_static(s);
5160     return DISAS_NEXT;
5161 }
5162 
5163 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5164 {
5165     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5166     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5167 
5168     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5169     tcg_temp_free_i32(ar);
5170     tcg_temp_free_i32(r1);
5171     set_cc_static(s);
5172     return DISAS_NEXT;
5173 }
5174 
5175 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5176 {
5177     gen_helper_sic(cpu_env, o->in1, o->in2);
5178     return DISAS_NEXT;
5179 }
5180 
5181 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5182 {
5183     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5184     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5185 
5186     gen_helper_rpcit(cpu_env, r1, r2);
5187     tcg_temp_free_i32(r1);
5188     tcg_temp_free_i32(r2);
5189     set_cc_static(s);
5190     return DISAS_NEXT;
5191 }
5192 
5193 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5194 {
5195     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5196     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5197     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5198 
5199     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5200     tcg_temp_free_i32(ar);
5201     tcg_temp_free_i32(r1);
5202     tcg_temp_free_i32(r3);
5203     set_cc_static(s);
5204     return DISAS_NEXT;
5205 }
5206 
5207 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5208 {
5209     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5210     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5211 
5212     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5213     tcg_temp_free_i32(ar);
5214     tcg_temp_free_i32(r1);
5215     set_cc_static(s);
5216     return DISAS_NEXT;
5217 }
5218 #endif
5219 
5220 #include "translate_vx.c.inc"
5221 
5222 /* ====================================================================== */
5223 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5224    the original inputs), update the various cc data structures in order to
5225    be able to compute the new condition code.  */
5226 
5227 static void cout_abs32(DisasContext *s, DisasOps *o)
5228 {
5229     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5230 }
5231 
5232 static void cout_abs64(DisasContext *s, DisasOps *o)
5233 {
5234     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5235 }
5236 
5237 static void cout_adds32(DisasContext *s, DisasOps *o)
5238 {
5239     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5240 }
5241 
5242 static void cout_adds64(DisasContext *s, DisasOps *o)
5243 {
5244     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5245 }
5246 
5247 static void cout_addu32(DisasContext *s, DisasOps *o)
5248 {
5249     tcg_gen_shri_i64(cc_src, o->out, 32);
5250     tcg_gen_ext32u_i64(cc_dst, o->out);
5251     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5252 }
5253 
5254 static void cout_addu64(DisasContext *s, DisasOps *o)
5255 {
5256     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5257 }
5258 
5259 static void cout_cmps32(DisasContext *s, DisasOps *o)
5260 {
5261     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5262 }
5263 
5264 static void cout_cmps64(DisasContext *s, DisasOps *o)
5265 {
5266     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5267 }
5268 
5269 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5270 {
5271     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5272 }
5273 
5274 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5275 {
5276     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5277 }
5278 
5279 static void cout_f32(DisasContext *s, DisasOps *o)
5280 {
5281     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5282 }
5283 
5284 static void cout_f64(DisasContext *s, DisasOps *o)
5285 {
5286     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5287 }
5288 
5289 static void cout_f128(DisasContext *s, DisasOps *o)
5290 {
5291     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5292 }
5293 
5294 static void cout_nabs32(DisasContext *s, DisasOps *o)
5295 {
5296     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5297 }
5298 
5299 static void cout_nabs64(DisasContext *s, DisasOps *o)
5300 {
5301     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5302 }
5303 
5304 static void cout_neg32(DisasContext *s, DisasOps *o)
5305 {
5306     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5307 }
5308 
5309 static void cout_neg64(DisasContext *s, DisasOps *o)
5310 {
5311     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5312 }
5313 
5314 static void cout_nz32(DisasContext *s, DisasOps *o)
5315 {
5316     tcg_gen_ext32u_i64(cc_dst, o->out);
5317     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5318 }
5319 
5320 static void cout_nz64(DisasContext *s, DisasOps *o)
5321 {
5322     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5323 }
5324 
5325 static void cout_s32(DisasContext *s, DisasOps *o)
5326 {
5327     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5328 }
5329 
5330 static void cout_s64(DisasContext *s, DisasOps *o)
5331 {
5332     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5333 }
5334 
5335 static void cout_subs32(DisasContext *s, DisasOps *o)
5336 {
5337     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5338 }
5339 
5340 static void cout_subs64(DisasContext *s, DisasOps *o)
5341 {
5342     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5343 }
5344 
5345 static void cout_subu32(DisasContext *s, DisasOps *o)
5346 {
5347     tcg_gen_sari_i64(cc_src, o->out, 32);
5348     tcg_gen_ext32u_i64(cc_dst, o->out);
5349     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5350 }
5351 
5352 static void cout_subu64(DisasContext *s, DisasOps *o)
5353 {
5354     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5355 }
5356 
5357 static void cout_tm32(DisasContext *s, DisasOps *o)
5358 {
5359     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5360 }
5361 
5362 static void cout_tm64(DisasContext *s, DisasOps *o)
5363 {
5364     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5365 }
5366 
5367 static void cout_muls32(DisasContext *s, DisasOps *o)
5368 {
5369     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5370 }
5371 
5372 static void cout_muls64(DisasContext *s, DisasOps *o)
5373 {
5374     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5375     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5376 }
5377 
5378 /* ====================================================================== */
5379 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5380    with the TCG register to which we will write.  Used in combination with
5381    the "wout" generators, in some cases we need a new temporary, and in
5382    some cases we can write to a TCG global.  */
5383 
5384 static void prep_new(DisasContext *s, DisasOps *o)
5385 {
5386     o->out = tcg_temp_new_i64();
5387 }
5388 #define SPEC_prep_new 0
5389 
5390 static void prep_new_P(DisasContext *s, DisasOps *o)
5391 {
5392     o->out = tcg_temp_new_i64();
5393     o->out2 = tcg_temp_new_i64();
5394 }
5395 #define SPEC_prep_new_P 0
5396 
5397 static void prep_new_x(DisasContext *s, DisasOps *o)
5398 {
5399     o->out_128 = tcg_temp_new_i128();
5400 }
5401 #define SPEC_prep_new_x 0
5402 
5403 static void prep_r1(DisasContext *s, DisasOps *o)
5404 {
5405     o->out = regs[get_field(s, r1)];
5406     o->g_out = true;
5407 }
5408 #define SPEC_prep_r1 0
5409 
5410 static void prep_r1_P(DisasContext *s, DisasOps *o)
5411 {
5412     int r1 = get_field(s, r1);
5413     o->out = regs[r1];
5414     o->out2 = regs[r1 + 1];
5415     o->g_out = o->g_out2 = true;
5416 }
5417 #define SPEC_prep_r1_P SPEC_r1_even
5418 
5419 static void prep_x1(DisasContext *s, DisasOps *o)
5420 {
5421     o->out_128 = load_freg_128(get_field(s, r1));
5422 }
5423 #define SPEC_prep_x1 SPEC_r1_f128
5424 
5425 /* ====================================================================== */
5426 /* The "Write OUTput" generators.  These generally perform some non-trivial
5427    copy of data to TCG globals, or to main memory.  The trivial cases are
5428    generally handled by having a "prep" generator install the TCG global
5429    as the destination of the operation.  */
5430 
5431 static void wout_r1(DisasContext *s, DisasOps *o)
5432 {
5433     store_reg(get_field(s, r1), o->out);
5434 }
5435 #define SPEC_wout_r1 0
5436 
5437 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5438 {
5439     store_reg(get_field(s, r1), o->out2);
5440 }
5441 #define SPEC_wout_out2_r1 0
5442 
5443 static void wout_r1_8(DisasContext *s, DisasOps *o)
5444 {
5445     int r1 = get_field(s, r1);
5446     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5447 }
5448 #define SPEC_wout_r1_8 0
5449 
5450 static void wout_r1_16(DisasContext *s, DisasOps *o)
5451 {
5452     int r1 = get_field(s, r1);
5453     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5454 }
5455 #define SPEC_wout_r1_16 0
5456 
5457 static void wout_r1_32(DisasContext *s, DisasOps *o)
5458 {
5459     store_reg32_i64(get_field(s, r1), o->out);
5460 }
5461 #define SPEC_wout_r1_32 0
5462 
5463 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5464 {
5465     store_reg32h_i64(get_field(s, r1), o->out);
5466 }
5467 #define SPEC_wout_r1_32h 0
5468 
5469 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5470 {
5471     int r1 = get_field(s, r1);
5472     store_reg32_i64(r1, o->out);
5473     store_reg32_i64(r1 + 1, o->out2);
5474 }
5475 #define SPEC_wout_r1_P32 SPEC_r1_even
5476 
5477 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5478 {
5479     int r1 = get_field(s, r1);
5480     TCGv_i64 t = tcg_temp_new_i64();
5481     store_reg32_i64(r1 + 1, o->out);
5482     tcg_gen_shri_i64(t, o->out, 32);
5483     store_reg32_i64(r1, t);
5484     tcg_temp_free_i64(t);
5485 }
5486 #define SPEC_wout_r1_D32 SPEC_r1_even
5487 
5488 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5489 {
5490     int r1 = get_field(s, r1);
5491     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5492 }
5493 #define SPEC_wout_r1_D64 SPEC_r1_even
5494 
5495 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5496 {
5497     int r3 = get_field(s, r3);
5498     store_reg32_i64(r3, o->out);
5499     store_reg32_i64(r3 + 1, o->out2);
5500 }
5501 #define SPEC_wout_r3_P32 SPEC_r3_even
5502 
5503 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5504 {
5505     int r3 = get_field(s, r3);
5506     store_reg(r3, o->out);
5507     store_reg(r3 + 1, o->out2);
5508 }
5509 #define SPEC_wout_r3_P64 SPEC_r3_even
5510 
5511 static void wout_e1(DisasContext *s, DisasOps *o)
5512 {
5513     store_freg32_i64(get_field(s, r1), o->out);
5514 }
5515 #define SPEC_wout_e1 0
5516 
5517 static void wout_f1(DisasContext *s, DisasOps *o)
5518 {
5519     store_freg(get_field(s, r1), o->out);
5520 }
5521 #define SPEC_wout_f1 0
5522 
5523 static void wout_x1(DisasContext *s, DisasOps *o)
5524 {
5525     int f1 = get_field(s, r1);
5526 
5527     /* Split out_128 into out+out2 for cout_f128. */
5528     tcg_debug_assert(o->out == NULL);
5529     o->out = tcg_temp_new_i64();
5530     o->out2 = tcg_temp_new_i64();
5531 
5532     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5533     store_freg(f1, o->out);
5534     store_freg(f1 + 2, o->out2);
5535 }
5536 #define SPEC_wout_x1 SPEC_r1_f128
5537 
5538 static void wout_x1_P(DisasContext *s, DisasOps *o)
5539 {
5540     int f1 = get_field(s, r1);
5541     store_freg(f1, o->out);
5542     store_freg(f1 + 2, o->out2);
5543 }
5544 #define SPEC_wout_x1_P SPEC_r1_f128
5545 
5546 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5547 {
5548     if (get_field(s, r1) != get_field(s, r2)) {
5549         store_reg32_i64(get_field(s, r1), o->out);
5550     }
5551 }
5552 #define SPEC_wout_cond_r1r2_32 0
5553 
5554 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5555 {
5556     if (get_field(s, r1) != get_field(s, r2)) {
5557         store_freg32_i64(get_field(s, r1), o->out);
5558     }
5559 }
5560 #define SPEC_wout_cond_e1e2 0
5561 
5562 static void wout_m1_8(DisasContext *s, DisasOps *o)
5563 {
5564     tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5565 }
5566 #define SPEC_wout_m1_8 0
5567 
5568 static void wout_m1_16(DisasContext *s, DisasOps *o)
5569 {
5570     tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5571 }
5572 #define SPEC_wout_m1_16 0
5573 
5574 #ifndef CONFIG_USER_ONLY
5575 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5576 {
5577     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5578 }
5579 #define SPEC_wout_m1_16a 0
5580 #endif
5581 
5582 static void wout_m1_32(DisasContext *s, DisasOps *o)
5583 {
5584     tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5585 }
5586 #define SPEC_wout_m1_32 0
5587 
5588 #ifndef CONFIG_USER_ONLY
5589 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5590 {
5591     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5592 }
5593 #define SPEC_wout_m1_32a 0
5594 #endif
5595 
5596 static void wout_m1_64(DisasContext *s, DisasOps *o)
5597 {
5598     tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5599 }
5600 #define SPEC_wout_m1_64 0
5601 
5602 #ifndef CONFIG_USER_ONLY
5603 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5604 {
5605     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5606 }
5607 #define SPEC_wout_m1_64a 0
5608 #endif
5609 
5610 static void wout_m2_32(DisasContext *s, DisasOps *o)
5611 {
5612     tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5613 }
5614 #define SPEC_wout_m2_32 0
5615 
5616 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5617 {
5618     store_reg(get_field(s, r1), o->in2);
5619 }
5620 #define SPEC_wout_in2_r1 0
5621 
5622 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5623 {
5624     store_reg32_i64(get_field(s, r1), o->in2);
5625 }
5626 #define SPEC_wout_in2_r1_32 0
5627 
5628 /* ====================================================================== */
5629 /* The "INput 1" generators.  These load the first operand to an insn.  */
5630 
5631 static void in1_r1(DisasContext *s, DisasOps *o)
5632 {
5633     o->in1 = load_reg(get_field(s, r1));
5634 }
5635 #define SPEC_in1_r1 0
5636 
5637 static void in1_r1_o(DisasContext *s, DisasOps *o)
5638 {
5639     o->in1 = regs[get_field(s, r1)];
5640     o->g_in1 = true;
5641 }
5642 #define SPEC_in1_r1_o 0
5643 
5644 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5645 {
5646     o->in1 = tcg_temp_new_i64();
5647     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5648 }
5649 #define SPEC_in1_r1_32s 0
5650 
5651 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5652 {
5653     o->in1 = tcg_temp_new_i64();
5654     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5655 }
5656 #define SPEC_in1_r1_32u 0
5657 
5658 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5659 {
5660     o->in1 = tcg_temp_new_i64();
5661     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5662 }
5663 #define SPEC_in1_r1_sr32 0
5664 
5665 static void in1_r1p1(DisasContext *s, DisasOps *o)
5666 {
5667     o->in1 = load_reg(get_field(s, r1) + 1);
5668 }
5669 #define SPEC_in1_r1p1 SPEC_r1_even
5670 
5671 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5672 {
5673     o->in1 = regs[get_field(s, r1) + 1];
5674     o->g_in1 = true;
5675 }
5676 #define SPEC_in1_r1p1_o SPEC_r1_even
5677 
5678 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5679 {
5680     o->in1 = tcg_temp_new_i64();
5681     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5682 }
5683 #define SPEC_in1_r1p1_32s SPEC_r1_even
5684 
5685 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5686 {
5687     o->in1 = tcg_temp_new_i64();
5688     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5689 }
5690 #define SPEC_in1_r1p1_32u SPEC_r1_even
5691 
5692 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5693 {
5694     int r1 = get_field(s, r1);
5695     o->in1 = tcg_temp_new_i64();
5696     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5697 }
5698 #define SPEC_in1_r1_D32 SPEC_r1_even
5699 
5700 static void in1_r2(DisasContext *s, DisasOps *o)
5701 {
5702     o->in1 = load_reg(get_field(s, r2));
5703 }
5704 #define SPEC_in1_r2 0
5705 
5706 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5707 {
5708     o->in1 = tcg_temp_new_i64();
5709     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5710 }
5711 #define SPEC_in1_r2_sr32 0
5712 
5713 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5714 {
5715     o->in1 = tcg_temp_new_i64();
5716     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5717 }
5718 #define SPEC_in1_r2_32u 0
5719 
5720 static void in1_r3(DisasContext *s, DisasOps *o)
5721 {
5722     o->in1 = load_reg(get_field(s, r3));
5723 }
5724 #define SPEC_in1_r3 0
5725 
5726 static void in1_r3_o(DisasContext *s, DisasOps *o)
5727 {
5728     o->in1 = regs[get_field(s, r3)];
5729     o->g_in1 = true;
5730 }
5731 #define SPEC_in1_r3_o 0
5732 
5733 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5734 {
5735     o->in1 = tcg_temp_new_i64();
5736     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5737 }
5738 #define SPEC_in1_r3_32s 0
5739 
5740 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5741 {
5742     o->in1 = tcg_temp_new_i64();
5743     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5744 }
5745 #define SPEC_in1_r3_32u 0
5746 
5747 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5748 {
5749     int r3 = get_field(s, r3);
5750     o->in1 = tcg_temp_new_i64();
5751     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5752 }
5753 #define SPEC_in1_r3_D32 SPEC_r3_even
5754 
5755 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5756 {
5757     o->in1 = tcg_temp_new_i64();
5758     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5759 }
5760 #define SPEC_in1_r3_sr32 0
5761 
5762 static void in1_e1(DisasContext *s, DisasOps *o)
5763 {
5764     o->in1 = load_freg32_i64(get_field(s, r1));
5765 }
5766 #define SPEC_in1_e1 0
5767 
5768 static void in1_f1(DisasContext *s, DisasOps *o)
5769 {
5770     o->in1 = load_freg(get_field(s, r1));
5771 }
5772 #define SPEC_in1_f1 0
5773 
5774 static void in1_x1(DisasContext *s, DisasOps *o)
5775 {
5776     o->in1_128 = load_freg_128(get_field(s, r1));
5777 }
5778 #define SPEC_in1_x1 SPEC_r1_f128
5779 
5780 /* Load the high double word of an extended (128-bit) format FP number */
5781 static void in1_x2h(DisasContext *s, DisasOps *o)
5782 {
5783     o->in1 = load_freg(get_field(s, r2));
5784 }
5785 #define SPEC_in1_x2h SPEC_r2_f128
5786 
5787 static void in1_f3(DisasContext *s, DisasOps *o)
5788 {
5789     o->in1 = load_freg(get_field(s, r3));
5790 }
5791 #define SPEC_in1_f3 0
5792 
5793 static void in1_la1(DisasContext *s, DisasOps *o)
5794 {
5795     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5796 }
5797 #define SPEC_in1_la1 0
5798 
5799 static void in1_la2(DisasContext *s, DisasOps *o)
5800 {
5801     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5802     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5803 }
5804 #define SPEC_in1_la2 0
5805 
5806 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5807 {
5808     in1_la1(s, o);
5809     o->in1 = tcg_temp_new_i64();
5810     tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5811 }
5812 #define SPEC_in1_m1_8u 0
5813 
5814 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5815 {
5816     in1_la1(s, o);
5817     o->in1 = tcg_temp_new_i64();
5818     tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5819 }
5820 #define SPEC_in1_m1_16s 0
5821 
5822 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5823 {
5824     in1_la1(s, o);
5825     o->in1 = tcg_temp_new_i64();
5826     tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5827 }
5828 #define SPEC_in1_m1_16u 0
5829 
5830 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5831 {
5832     in1_la1(s, o);
5833     o->in1 = tcg_temp_new_i64();
5834     tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5835 }
5836 #define SPEC_in1_m1_32s 0
5837 
5838 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5839 {
5840     in1_la1(s, o);
5841     o->in1 = tcg_temp_new_i64();
5842     tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5843 }
5844 #define SPEC_in1_m1_32u 0
5845 
5846 static void in1_m1_64(DisasContext *s, DisasOps *o)
5847 {
5848     in1_la1(s, o);
5849     o->in1 = tcg_temp_new_i64();
5850     tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5851 }
5852 #define SPEC_in1_m1_64 0
5853 
5854 /* ====================================================================== */
5855 /* The "INput 2" generators.  These load the second operand to an insn.  */
5856 
5857 static void in2_r1_o(DisasContext *s, DisasOps *o)
5858 {
5859     o->in2 = regs[get_field(s, r1)];
5860     o->g_in2 = true;
5861 }
5862 #define SPEC_in2_r1_o 0
5863 
5864 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5865 {
5866     o->in2 = tcg_temp_new_i64();
5867     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5868 }
5869 #define SPEC_in2_r1_16u 0
5870 
5871 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5872 {
5873     o->in2 = tcg_temp_new_i64();
5874     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5875 }
5876 #define SPEC_in2_r1_32u 0
5877 
5878 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5879 {
5880     int r1 = get_field(s, r1);
5881     o->in2 = tcg_temp_new_i64();
5882     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5883 }
5884 #define SPEC_in2_r1_D32 SPEC_r1_even
5885 
5886 static void in2_r2(DisasContext *s, DisasOps *o)
5887 {
5888     o->in2 = load_reg(get_field(s, r2));
5889 }
5890 #define SPEC_in2_r2 0
5891 
5892 static void in2_r2_o(DisasContext *s, DisasOps *o)
5893 {
5894     o->in2 = regs[get_field(s, r2)];
5895     o->g_in2 = true;
5896 }
5897 #define SPEC_in2_r2_o 0
5898 
5899 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5900 {
5901     int r2 = get_field(s, r2);
5902     if (r2 != 0) {
5903         o->in2 = load_reg(r2);
5904     }
5905 }
5906 #define SPEC_in2_r2_nz 0
5907 
5908 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5909 {
5910     o->in2 = tcg_temp_new_i64();
5911     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5912 }
5913 #define SPEC_in2_r2_8s 0
5914 
5915 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5916 {
5917     o->in2 = tcg_temp_new_i64();
5918     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5919 }
5920 #define SPEC_in2_r2_8u 0
5921 
5922 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5923 {
5924     o->in2 = tcg_temp_new_i64();
5925     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5926 }
5927 #define SPEC_in2_r2_16s 0
5928 
5929 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5930 {
5931     o->in2 = tcg_temp_new_i64();
5932     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5933 }
5934 #define SPEC_in2_r2_16u 0
5935 
5936 static void in2_r3(DisasContext *s, DisasOps *o)
5937 {
5938     o->in2 = load_reg(get_field(s, r3));
5939 }
5940 #define SPEC_in2_r3 0
5941 
5942 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5943 {
5944     int r3 = get_field(s, r3);
5945     o->in2_128 = tcg_temp_new_i128();
5946     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5947 }
5948 #define SPEC_in2_r3_D64 SPEC_r3_even
5949 
5950 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5951 {
5952     o->in2 = tcg_temp_new_i64();
5953     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5954 }
5955 #define SPEC_in2_r3_sr32 0
5956 
5957 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5958 {
5959     o->in2 = tcg_temp_new_i64();
5960     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5961 }
5962 #define SPEC_in2_r3_32u 0
5963 
5964 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5965 {
5966     o->in2 = tcg_temp_new_i64();
5967     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5968 }
5969 #define SPEC_in2_r2_32s 0
5970 
5971 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5972 {
5973     o->in2 = tcg_temp_new_i64();
5974     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5975 }
5976 #define SPEC_in2_r2_32u 0
5977 
5978 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5979 {
5980     o->in2 = tcg_temp_new_i64();
5981     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5982 }
5983 #define SPEC_in2_r2_sr32 0
5984 
5985 static void in2_e2(DisasContext *s, DisasOps *o)
5986 {
5987     o->in2 = load_freg32_i64(get_field(s, r2));
5988 }
5989 #define SPEC_in2_e2 0
5990 
5991 static void in2_f2(DisasContext *s, DisasOps *o)
5992 {
5993     o->in2 = load_freg(get_field(s, r2));
5994 }
5995 #define SPEC_in2_f2 0
5996 
5997 static void in2_x2(DisasContext *s, DisasOps *o)
5998 {
5999     o->in2_128 = load_freg_128(get_field(s, r2));
6000 }
6001 #define SPEC_in2_x2 SPEC_r2_f128
6002 
6003 /* Load the low double word of an extended (128-bit) format FP number */
6004 static void in2_x2l(DisasContext *s, DisasOps *o)
6005 {
6006     o->in2 = load_freg(get_field(s, r2) + 2);
6007 }
6008 #define SPEC_in2_x2l SPEC_r2_f128
6009 
6010 static void in2_ra2(DisasContext *s, DisasOps *o)
6011 {
6012     int r2 = get_field(s, r2);
6013 
6014     /* Note: *don't* treat !r2 as 0, use the reg value. */
6015     o->in2 = tcg_temp_new_i64();
6016     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
6017 }
6018 #define SPEC_in2_ra2 0
6019 
6020 static void in2_a2(DisasContext *s, DisasOps *o)
6021 {
6022     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
6023     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
6024 }
6025 #define SPEC_in2_a2 0
6026 
6027 static void in2_ri2(DisasContext *s, DisasOps *o)
6028 {
6029     o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
6030 }
6031 #define SPEC_in2_ri2 0
6032 
6033 static void in2_sh(DisasContext *s, DisasOps *o)
6034 {
6035     int b2 = get_field(s, b2);
6036     int d2 = get_field(s, d2);
6037 
6038     if (b2 == 0) {
6039         o->in2 = tcg_const_i64(d2 & 0x3f);
6040     } else {
6041         o->in2 = get_address(s, 0, b2, d2);
6042         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
6043     }
6044 }
6045 #define SPEC_in2_sh 0
6046 
6047 static void in2_m2_8u(DisasContext *s, DisasOps *o)
6048 {
6049     in2_a2(s, o);
6050     tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
6051 }
6052 #define SPEC_in2_m2_8u 0
6053 
6054 static void in2_m2_16s(DisasContext *s, DisasOps *o)
6055 {
6056     in2_a2(s, o);
6057     tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
6058 }
6059 #define SPEC_in2_m2_16s 0
6060 
6061 static void in2_m2_16u(DisasContext *s, DisasOps *o)
6062 {
6063     in2_a2(s, o);
6064     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6065 }
6066 #define SPEC_in2_m2_16u 0
6067 
6068 static void in2_m2_32s(DisasContext *s, DisasOps *o)
6069 {
6070     in2_a2(s, o);
6071     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6072 }
6073 #define SPEC_in2_m2_32s 0
6074 
6075 static void in2_m2_32u(DisasContext *s, DisasOps *o)
6076 {
6077     in2_a2(s, o);
6078     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6079 }
6080 #define SPEC_in2_m2_32u 0
6081 
6082 #ifndef CONFIG_USER_ONLY
6083 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
6084 {
6085     in2_a2(s, o);
6086     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
6087 }
6088 #define SPEC_in2_m2_32ua 0
6089 #endif
6090 
6091 static void in2_m2_64(DisasContext *s, DisasOps *o)
6092 {
6093     in2_a2(s, o);
6094     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6095 }
6096 #define SPEC_in2_m2_64 0
6097 
6098 static void in2_m2_64w(DisasContext *s, DisasOps *o)
6099 {
6100     in2_a2(s, o);
6101     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6102     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
6103 }
6104 #define SPEC_in2_m2_64w 0
6105 
6106 #ifndef CONFIG_USER_ONLY
6107 static void in2_m2_64a(DisasContext *s, DisasOps *o)
6108 {
6109     in2_a2(s, o);
6110     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
6111 }
6112 #define SPEC_in2_m2_64a 0
6113 #endif
6114 
6115 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6116 {
6117     in2_ri2(s, o);
6118     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6119 }
6120 #define SPEC_in2_mri2_16u 0
6121 
6122 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6123 {
6124     in2_ri2(s, o);
6125     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6126 }
6127 #define SPEC_in2_mri2_32s 0
6128 
6129 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6130 {
6131     in2_ri2(s, o);
6132     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6133 }
6134 #define SPEC_in2_mri2_32u 0
6135 
6136 static void in2_mri2_64(DisasContext *s, DisasOps *o)
6137 {
6138     in2_ri2(s, o);
6139     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6140 }
6141 #define SPEC_in2_mri2_64 0
6142 
6143 static void in2_i2(DisasContext *s, DisasOps *o)
6144 {
6145     o->in2 = tcg_const_i64(get_field(s, i2));
6146 }
6147 #define SPEC_in2_i2 0
6148 
6149 static void in2_i2_8u(DisasContext *s, DisasOps *o)
6150 {
6151     o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6152 }
6153 #define SPEC_in2_i2_8u 0
6154 
6155 static void in2_i2_16u(DisasContext *s, DisasOps *o)
6156 {
6157     o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6158 }
6159 #define SPEC_in2_i2_16u 0
6160 
6161 static void in2_i2_32u(DisasContext *s, DisasOps *o)
6162 {
6163     o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6164 }
6165 #define SPEC_in2_i2_32u 0
6166 
6167 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6168 {
6169     uint64_t i2 = (uint16_t)get_field(s, i2);
6170     o->in2 = tcg_const_i64(i2 << s->insn->data);
6171 }
6172 #define SPEC_in2_i2_16u_shl 0
6173 
6174 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6175 {
6176     uint64_t i2 = (uint32_t)get_field(s, i2);
6177     o->in2 = tcg_const_i64(i2 << s->insn->data);
6178 }
6179 #define SPEC_in2_i2_32u_shl 0
6180 
6181 #ifndef CONFIG_USER_ONLY
6182 static void in2_insn(DisasContext *s, DisasOps *o)
6183 {
6184     o->in2 = tcg_const_i64(s->fields.raw_insn);
6185 }
6186 #define SPEC_in2_insn 0
6187 #endif
6188 
6189 /* ====================================================================== */
6190 
6191 /* Find opc within the table of insns.  This is formulated as a switch
6192    statement so that (1) we get compile-time notice of cut-paste errors
6193    for duplicated opcodes, and (2) the compiler generates the binary
6194    search tree, rather than us having to post-process the table.  */
6195 
6196 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6197     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6198 
6199 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6200     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6201 
6202 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6203     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6204 
6205 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6206 
6207 enum DisasInsnEnum {
6208 #include "insn-data.h.inc"
6209 };
6210 
6211 #undef E
6212 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6213     .opc = OPC,                                                             \
6214     .flags = FL,                                                            \
6215     .fmt = FMT_##FT,                                                        \
6216     .fac = FAC_##FC,                                                        \
6217     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6218     .name = #NM,                                                            \
6219     .help_in1 = in1_##I1,                                                   \
6220     .help_in2 = in2_##I2,                                                   \
6221     .help_prep = prep_##P,                                                  \
6222     .help_wout = wout_##W,                                                  \
6223     .help_cout = cout_##CC,                                                 \
6224     .help_op = op_##OP,                                                     \
6225     .data = D                                                               \
6226  },
6227 
6228 /* Allow 0 to be used for NULL in the table below.  */
6229 #define in1_0  NULL
6230 #define in2_0  NULL
6231 #define prep_0  NULL
6232 #define wout_0  NULL
6233 #define cout_0  NULL
6234 #define op_0  NULL
6235 
6236 #define SPEC_in1_0 0
6237 #define SPEC_in2_0 0
6238 #define SPEC_prep_0 0
6239 #define SPEC_wout_0 0
6240 
6241 /* Give smaller names to the various facilities.  */
6242 #define FAC_Z           S390_FEAT_ZARCH
6243 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6244 #define FAC_DFP         S390_FEAT_DFP
6245 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6246 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6247 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6248 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6249 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6250 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6251 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6252 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6253 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6254 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6255 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6256 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6257 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6258 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6259 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6260 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6261 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6262 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6263 #define FAC_SFLE        S390_FEAT_STFLE
6264 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6265 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6266 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6267 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6268 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6269 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6270 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6271 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6272 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6273 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6274 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6275 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6276 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6277 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6278 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6279 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6280 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6281 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6282 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6283 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6284 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6285 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6286 
6287 static const DisasInsn insn_info[] = {
6288 #include "insn-data.h.inc"
6289 };
6290 
6291 #undef E
6292 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6293     case OPC: return &insn_info[insn_ ## NM];
6294 
6295 static const DisasInsn *lookup_opc(uint16_t opc)
6296 {
6297     switch (opc) {
6298 #include "insn-data.h.inc"
6299     default:
6300         return NULL;
6301     }
6302 }
6303 
6304 #undef F
6305 #undef E
6306 #undef D
6307 #undef C
6308 
6309 /* Extract a field from the insn.  The INSN should be left-aligned in
6310    the uint64_t so that we can more easily utilize the big-bit-endian
6311    definitions we extract from the Principals of Operation.  */
6312 
6313 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6314 {
6315     uint32_t r, m;
6316 
6317     if (f->size == 0) {
6318         return;
6319     }
6320 
6321     /* Zero extract the field from the insn.  */
6322     r = (insn << f->beg) >> (64 - f->size);
6323 
6324     /* Sign-extend, or un-swap the field as necessary.  */
6325     switch (f->type) {
6326     case 0: /* unsigned */
6327         break;
6328     case 1: /* signed */
6329         assert(f->size <= 32);
6330         m = 1u << (f->size - 1);
6331         r = (r ^ m) - m;
6332         break;
6333     case 2: /* dl+dh split, signed 20 bit. */
6334         r = ((int8_t)r << 12) | (r >> 8);
6335         break;
6336     case 3: /* MSB stored in RXB */
6337         g_assert(f->size == 4);
6338         switch (f->beg) {
6339         case 8:
6340             r |= extract64(insn, 63 - 36, 1) << 4;
6341             break;
6342         case 12:
6343             r |= extract64(insn, 63 - 37, 1) << 4;
6344             break;
6345         case 16:
6346             r |= extract64(insn, 63 - 38, 1) << 4;
6347             break;
6348         case 32:
6349             r |= extract64(insn, 63 - 39, 1) << 4;
6350             break;
6351         default:
6352             g_assert_not_reached();
6353         }
6354         break;
6355     default:
6356         abort();
6357     }
6358 
6359     /*
6360      * Validate that the "compressed" encoding we selected above is valid.
6361      * I.e. we haven't made two different original fields overlap.
6362      */
6363     assert(((o->presentC >> f->indexC) & 1) == 0);
6364     o->presentC |= 1 << f->indexC;
6365     o->presentO |= 1 << f->indexO;
6366 
6367     o->c[f->indexC] = r;
6368 }
6369 
6370 /* Lookup the insn at the current PC, extracting the operands into O and
6371    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6372 
6373 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6374 {
6375     uint64_t insn, pc = s->base.pc_next;
6376     int op, op2, ilen;
6377     const DisasInsn *info;
6378 
6379     if (unlikely(s->ex_value)) {
6380         /* Drop the EX data now, so that it's clear on exception paths.  */
6381         TCGv_i64 zero = tcg_const_i64(0);
6382         int i;
6383         tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6384         tcg_temp_free_i64(zero);
6385 
6386         /* Extract the values saved by EXECUTE.  */
6387         insn = s->ex_value & 0xffffffffffff0000ull;
6388         ilen = s->ex_value & 0xf;
6389         /* register insn bytes with translator so plugins work */
6390         for (i = 0; i < ilen; i++) {
6391             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6392             translator_fake_ldb(byte, pc + i);
6393         }
6394         op = insn >> 56;
6395     } else {
6396         insn = ld_code2(env, s, pc);
6397         op = (insn >> 8) & 0xff;
6398         ilen = get_ilen(op);
6399         switch (ilen) {
6400         case 2:
6401             insn = insn << 48;
6402             break;
6403         case 4:
6404             insn = ld_code4(env, s, pc) << 32;
6405             break;
6406         case 6:
6407             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6408             break;
6409         default:
6410             g_assert_not_reached();
6411         }
6412     }
6413     s->pc_tmp = s->base.pc_next + ilen;
6414     s->ilen = ilen;
6415 
6416     /* We can't actually determine the insn format until we've looked up
6417        the full insn opcode.  Which we can't do without locating the
6418        secondary opcode.  Assume by default that OP2 is at bit 40; for
6419        those smaller insns that don't actually have a secondary opcode
6420        this will correctly result in OP2 = 0. */
6421     switch (op) {
6422     case 0x01: /* E */
6423     case 0x80: /* S */
6424     case 0x82: /* S */
6425     case 0x93: /* S */
6426     case 0xb2: /* S, RRF, RRE, IE */
6427     case 0xb3: /* RRE, RRD, RRF */
6428     case 0xb9: /* RRE, RRF */
6429     case 0xe5: /* SSE, SIL */
6430         op2 = (insn << 8) >> 56;
6431         break;
6432     case 0xa5: /* RI */
6433     case 0xa7: /* RI */
6434     case 0xc0: /* RIL */
6435     case 0xc2: /* RIL */
6436     case 0xc4: /* RIL */
6437     case 0xc6: /* RIL */
6438     case 0xc8: /* SSF */
6439     case 0xcc: /* RIL */
6440         op2 = (insn << 12) >> 60;
6441         break;
6442     case 0xc5: /* MII */
6443     case 0xc7: /* SMI */
6444     case 0xd0 ... 0xdf: /* SS */
6445     case 0xe1: /* SS */
6446     case 0xe2: /* SS */
6447     case 0xe8: /* SS */
6448     case 0xe9: /* SS */
6449     case 0xea: /* SS */
6450     case 0xee ... 0xf3: /* SS */
6451     case 0xf8 ... 0xfd: /* SS */
6452         op2 = 0;
6453         break;
6454     default:
6455         op2 = (insn << 40) >> 56;
6456         break;
6457     }
6458 
6459     memset(&s->fields, 0, sizeof(s->fields));
6460     s->fields.raw_insn = insn;
6461     s->fields.op = op;
6462     s->fields.op2 = op2;
6463 
6464     /* Lookup the instruction.  */
6465     info = lookup_opc(op << 8 | op2);
6466     s->insn = info;
6467 
6468     /* If we found it, extract the operands.  */
6469     if (info != NULL) {
6470         DisasFormat fmt = info->fmt;
6471         int i;
6472 
6473         for (i = 0; i < NUM_C_FIELD; ++i) {
6474             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6475         }
6476     }
6477     return info;
6478 }
6479 
6480 static bool is_afp_reg(int reg)
6481 {
6482     return reg % 2 || reg > 6;
6483 }
6484 
6485 static bool is_fp_pair(int reg)
6486 {
6487     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6488     return !(reg & 0x2);
6489 }
6490 
6491 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6492 {
6493     const DisasInsn *insn;
6494     DisasJumpType ret = DISAS_NEXT;
6495     DisasOps o = {};
6496     bool icount = false;
6497 
6498     /* Search for the insn in the table.  */
6499     insn = extract_insn(env, s);
6500 
6501     /* Update insn_start now that we know the ILEN.  */
6502     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6503 
6504     /* Not found means unimplemented/illegal opcode.  */
6505     if (insn == NULL) {
6506         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6507                       s->fields.op, s->fields.op2);
6508         gen_illegal_opcode(s);
6509         ret = DISAS_NORETURN;
6510         goto out;
6511     }
6512 
6513 #ifndef CONFIG_USER_ONLY
6514     if (s->base.tb->flags & FLAG_MASK_PER) {
6515         TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6516         gen_helper_per_ifetch(cpu_env, addr);
6517         tcg_temp_free_i64(addr);
6518     }
6519 #endif
6520 
6521     /* process flags */
6522     if (insn->flags) {
6523         /* privileged instruction */
6524         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6525             gen_program_exception(s, PGM_PRIVILEGED);
6526             ret = DISAS_NORETURN;
6527             goto out;
6528         }
6529 
6530         /* if AFP is not enabled, instructions and registers are forbidden */
6531         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6532             uint8_t dxc = 0;
6533 
6534             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6535                 dxc = 1;
6536             }
6537             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6538                 dxc = 1;
6539             }
6540             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6541                 dxc = 1;
6542             }
6543             if (insn->flags & IF_BFP) {
6544                 dxc = 2;
6545             }
6546             if (insn->flags & IF_DFP) {
6547                 dxc = 3;
6548             }
6549             if (insn->flags & IF_VEC) {
6550                 dxc = 0xfe;
6551             }
6552             if (dxc) {
6553                 gen_data_exception(dxc);
6554                 ret = DISAS_NORETURN;
6555                 goto out;
6556             }
6557         }
6558 
6559         /* if vector instructions not enabled, executing them is forbidden */
6560         if (insn->flags & IF_VEC) {
6561             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6562                 gen_data_exception(0xfe);
6563                 ret = DISAS_NORETURN;
6564                 goto out;
6565             }
6566         }
6567 
6568         /* input/output is the special case for icount mode */
6569         if (unlikely(insn->flags & IF_IO)) {
6570             icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6571             if (icount) {
6572                 gen_io_start();
6573             }
6574         }
6575     }
6576 
6577     /* Check for insn specification exceptions.  */
6578     if (insn->spec) {
6579         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6580             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6581             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6582             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6583             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6584             gen_program_exception(s, PGM_SPECIFICATION);
6585             ret = DISAS_NORETURN;
6586             goto out;
6587         }
6588     }
6589 
6590     /* Implement the instruction.  */
6591     if (insn->help_in1) {
6592         insn->help_in1(s, &o);
6593     }
6594     if (insn->help_in2) {
6595         insn->help_in2(s, &o);
6596     }
6597     if (insn->help_prep) {
6598         insn->help_prep(s, &o);
6599     }
6600     if (insn->help_op) {
6601         ret = insn->help_op(s, &o);
6602     }
6603     if (ret != DISAS_NORETURN) {
6604         if (insn->help_wout) {
6605             insn->help_wout(s, &o);
6606         }
6607         if (insn->help_cout) {
6608             insn->help_cout(s, &o);
6609         }
6610     }
6611 
6612     /* Free any temporaries created by the helpers.  */
6613     if (o.out && !o.g_out) {
6614         tcg_temp_free_i64(o.out);
6615     }
6616     if (o.out2 && !o.g_out2) {
6617         tcg_temp_free_i64(o.out2);
6618     }
6619     if (o.in1 && !o.g_in1) {
6620         tcg_temp_free_i64(o.in1);
6621     }
6622     if (o.in2 && !o.g_in2) {
6623         tcg_temp_free_i64(o.in2);
6624     }
6625     if (o.addr1) {
6626         tcg_temp_free_i64(o.addr1);
6627     }
6628     if (o.out_128) {
6629         tcg_temp_free_i128(o.out_128);
6630     }
6631     if (o.in1_128) {
6632         tcg_temp_free_i128(o.in1_128);
6633     }
6634     if (o.in2_128) {
6635         tcg_temp_free_i128(o.in2_128);
6636     }
6637     /* io should be the last instruction in tb when icount is enabled */
6638     if (unlikely(icount && ret == DISAS_NEXT)) {
6639         ret = DISAS_TOO_MANY;
6640     }
6641 
6642 #ifndef CONFIG_USER_ONLY
6643     if (s->base.tb->flags & FLAG_MASK_PER) {
6644         /* An exception might be triggered, save PSW if not already done.  */
6645         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6646             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6647         }
6648 
6649         /* Call the helper to check for a possible PER exception.  */
6650         gen_helper_per_check_exception(cpu_env);
6651     }
6652 #endif
6653 
6654 out:
6655     /* Advance to the next instruction.  */
6656     s->base.pc_next = s->pc_tmp;
6657     return ret;
6658 }
6659 
6660 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6661 {
6662     DisasContext *dc = container_of(dcbase, DisasContext, base);
6663 
6664     /* 31-bit mode */
6665     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6666         dc->base.pc_first &= 0x7fffffff;
6667         dc->base.pc_next = dc->base.pc_first;
6668     }
6669 
6670     dc->cc_op = CC_OP_DYNAMIC;
6671     dc->ex_value = dc->base.tb->cs_base;
6672     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6673 }
6674 
6675 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6676 {
6677 }
6678 
6679 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6680 {
6681     DisasContext *dc = container_of(dcbase, DisasContext, base);
6682 
6683     /* Delay the set of ilen until we've read the insn. */
6684     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6685     dc->insn_start = tcg_last_op();
6686 }
6687 
6688 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6689                                 uint64_t pc)
6690 {
6691     uint64_t insn = cpu_lduw_code(env, pc);
6692 
6693     return pc + get_ilen((insn >> 8) & 0xff);
6694 }
6695 
6696 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6697 {
6698     CPUS390XState *env = cs->env_ptr;
6699     DisasContext *dc = container_of(dcbase, DisasContext, base);
6700 
6701     dc->base.is_jmp = translate_one(env, dc);
6702     if (dc->base.is_jmp == DISAS_NEXT) {
6703         if (dc->ex_value ||
6704             !is_same_page(dcbase, dc->base.pc_next) ||
6705             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6706             dc->base.is_jmp = DISAS_TOO_MANY;
6707         }
6708     }
6709 }
6710 
6711 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6712 {
6713     DisasContext *dc = container_of(dcbase, DisasContext, base);
6714 
6715     switch (dc->base.is_jmp) {
6716     case DISAS_NORETURN:
6717         break;
6718     case DISAS_TOO_MANY:
6719         update_psw_addr(dc);
6720         /* FALLTHRU */
6721     case DISAS_PC_UPDATED:
6722         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6723            cc op type is in env */
6724         update_cc_op(dc);
6725         /* FALLTHRU */
6726     case DISAS_PC_CC_UPDATED:
6727         /* Exit the TB, either by raising a debug exception or by return.  */
6728         if (dc->exit_to_mainloop) {
6729             tcg_gen_exit_tb(NULL, 0);
6730         } else {
6731             tcg_gen_lookup_and_goto_ptr();
6732         }
6733         break;
6734     default:
6735         g_assert_not_reached();
6736     }
6737 }
6738 
6739 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6740                                CPUState *cs, FILE *logfile)
6741 {
6742     DisasContext *dc = container_of(dcbase, DisasContext, base);
6743 
6744     if (unlikely(dc->ex_value)) {
6745         /* ??? Unfortunately target_disas can't use host memory.  */
6746         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6747     } else {
6748         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6749         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6750     }
6751 }
6752 
6753 static const TranslatorOps s390x_tr_ops = {
6754     .init_disas_context = s390x_tr_init_disas_context,
6755     .tb_start           = s390x_tr_tb_start,
6756     .insn_start         = s390x_tr_insn_start,
6757     .translate_insn     = s390x_tr_translate_insn,
6758     .tb_stop            = s390x_tr_tb_stop,
6759     .disas_log          = s390x_tr_disas_log,
6760 };
6761 
6762 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
6763                            target_ulong pc, void *host_pc)
6764 {
6765     DisasContext dc;
6766 
6767     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6768 }
6769 
6770 void s390x_restore_state_to_opc(CPUState *cs,
6771                                 const TranslationBlock *tb,
6772                                 const uint64_t *data)
6773 {
6774     S390CPU *cpu = S390_CPU(cs);
6775     CPUS390XState *env = &cpu->env;
6776     int cc_op = data[1];
6777 
6778     env->psw.addr = data[0];
6779 
6780     /* Update the CC opcode if it is not already up-to-date.  */
6781     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6782         env->cc_op = cc_op;
6783     }
6784 
6785     /* Record ILEN.  */
6786     env->int_pgm_ilen = data[2];
6787 }
6788