xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision 7a5951f6)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44 
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48 
49 
50 /* Information that (most) every instruction needs to manipulate.  */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
54 
55 /*
56  * Define a structure to hold the decoded fields.  We'll store each inside
57  * an array indexed by an enum.  In order to conserve memory, we'll arrange
58  * for fields that do not exist at the same time to overlap, thus the "C"
59  * for compact.  For checking purposes there is an "O" for original index
60  * as well that will be applied to availability bitmaps.
61  */
62 
63 enum DisasFieldIndexO {
64     FLD_O_r1,
65     FLD_O_r2,
66     FLD_O_r3,
67     FLD_O_m1,
68     FLD_O_m3,
69     FLD_O_m4,
70     FLD_O_m5,
71     FLD_O_m6,
72     FLD_O_b1,
73     FLD_O_b2,
74     FLD_O_b4,
75     FLD_O_d1,
76     FLD_O_d2,
77     FLD_O_d4,
78     FLD_O_x2,
79     FLD_O_l1,
80     FLD_O_l2,
81     FLD_O_i1,
82     FLD_O_i2,
83     FLD_O_i3,
84     FLD_O_i4,
85     FLD_O_i5,
86     FLD_O_v1,
87     FLD_O_v2,
88     FLD_O_v3,
89     FLD_O_v4,
90 };
91 
92 enum DisasFieldIndexC {
93     FLD_C_r1 = 0,
94     FLD_C_m1 = 0,
95     FLD_C_b1 = 0,
96     FLD_C_i1 = 0,
97     FLD_C_v1 = 0,
98 
99     FLD_C_r2 = 1,
100     FLD_C_b2 = 1,
101     FLD_C_i2 = 1,
102 
103     FLD_C_r3 = 2,
104     FLD_C_m3 = 2,
105     FLD_C_i3 = 2,
106     FLD_C_v3 = 2,
107 
108     FLD_C_m4 = 3,
109     FLD_C_b4 = 3,
110     FLD_C_i4 = 3,
111     FLD_C_l1 = 3,
112     FLD_C_v4 = 3,
113 
114     FLD_C_i5 = 4,
115     FLD_C_d1 = 4,
116     FLD_C_m5 = 4,
117 
118     FLD_C_d2 = 5,
119     FLD_C_m6 = 5,
120 
121     FLD_C_d4 = 6,
122     FLD_C_x2 = 6,
123     FLD_C_l2 = 6,
124     FLD_C_v2 = 6,
125 
126     NUM_C_FIELD = 7
127 };
128 
129 struct DisasFields {
130     uint64_t raw_insn;
131     unsigned op:8;
132     unsigned op2:8;
133     unsigned presentC:16;
134     unsigned int presentO;
135     int c[NUM_C_FIELD];
136 };
137 
138 struct DisasContext {
139     DisasContextBase base;
140     const DisasInsn *insn;
141     TCGOp *insn_start;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152     bool exit_to_mainloop;
153 };
154 
155 /* Information carried about a condition to be evaluated.  */
156 typedef struct {
157     TCGCond cond:8;
158     bool is_64;
159     bool g1;
160     bool g2;
161     union {
162         struct { TCGv_i64 a, b; } s64;
163         struct { TCGv_i32 a, b; } s32;
164     } u;
165 } DisasCompare;
166 
167 #ifdef DEBUG_INLINE_BRANCHES
168 static uint64_t inline_branch_hit[CC_OP_MAX];
169 static uint64_t inline_branch_miss[CC_OP_MAX];
170 #endif
171 
172 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
173 {
174     TCGv_i64 tmp;
175 
176     if (s->base.tb->flags & FLAG_MASK_32) {
177         if (s->base.tb->flags & FLAG_MASK_64) {
178             tcg_gen_movi_i64(out, pc);
179             return;
180         }
181         pc |= 0x80000000;
182     }
183     assert(!(s->base.tb->flags & FLAG_MASK_64));
184     tmp = tcg_const_i64(pc);
185     tcg_gen_deposit_i64(out, out, tmp, 0, 32);
186     tcg_temp_free_i64(tmp);
187 }
188 
189 static TCGv_i64 psw_addr;
190 static TCGv_i64 psw_mask;
191 static TCGv_i64 gbea;
192 
193 static TCGv_i32 cc_op;
194 static TCGv_i64 cc_src;
195 static TCGv_i64 cc_dst;
196 static TCGv_i64 cc_vr;
197 
198 static char cpu_reg_names[16][4];
199 static TCGv_i64 regs[16];
200 
201 void s390x_translate_init(void)
202 {
203     int i;
204 
205     psw_addr = tcg_global_mem_new_i64(cpu_env,
206                                       offsetof(CPUS390XState, psw.addr),
207                                       "psw_addr");
208     psw_mask = tcg_global_mem_new_i64(cpu_env,
209                                       offsetof(CPUS390XState, psw.mask),
210                                       "psw_mask");
211     gbea = tcg_global_mem_new_i64(cpu_env,
212                                   offsetof(CPUS390XState, gbea),
213                                   "gbea");
214 
215     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
216                                    "cc_op");
217     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
218                                     "cc_src");
219     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
220                                     "cc_dst");
221     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
222                                    "cc_vr");
223 
224     for (i = 0; i < 16; i++) {
225         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
226         regs[i] = tcg_global_mem_new(cpu_env,
227                                      offsetof(CPUS390XState, regs[i]),
228                                      cpu_reg_names[i]);
229     }
230 }
231 
232 static inline int vec_full_reg_offset(uint8_t reg)
233 {
234     g_assert(reg < 32);
235     return offsetof(CPUS390XState, vregs[reg][0]);
236 }
237 
238 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
239 {
240     /* Convert element size (es) - e.g. MO_8 - to bytes */
241     const uint8_t bytes = 1 << es;
242     int offs = enr * bytes;
243 
244     /*
245      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
246      * of the 16 byte vector, on both, little and big endian systems.
247      *
248      * Big Endian (target/possible host)
249      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
250      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
251      * W:  [             0][             1] - [             2][             3]
252      * DW: [                             0] - [                             1]
253      *
254      * Little Endian (possible host)
255      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
256      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
257      * W:  [             1][             0] - [             3][             2]
258      * DW: [                             0] - [                             1]
259      *
260      * For 16 byte elements, the two 8 byte halves will not form a host
261      * int128 if the host is little endian, since they're in the wrong order.
262      * Some operations (e.g. xor) do not care. For operations like addition,
263      * the two 8 byte elements have to be loaded separately. Let's force all
264      * 16 byte operations to handle it in a special way.
265      */
266     g_assert(es <= MO_64);
267 #if !HOST_BIG_ENDIAN
268     offs ^= (8 - bytes);
269 #endif
270     return offs + vec_full_reg_offset(reg);
271 }
272 
273 static inline int freg64_offset(uint8_t reg)
274 {
275     g_assert(reg < 16);
276     return vec_reg_offset(reg, 0, MO_64);
277 }
278 
279 static inline int freg32_offset(uint8_t reg)
280 {
281     g_assert(reg < 16);
282     return vec_reg_offset(reg, 0, MO_32);
283 }
284 
285 static TCGv_i64 load_reg(int reg)
286 {
287     TCGv_i64 r = tcg_temp_new_i64();
288     tcg_gen_mov_i64(r, regs[reg]);
289     return r;
290 }
291 
292 static TCGv_i64 load_freg(int reg)
293 {
294     TCGv_i64 r = tcg_temp_new_i64();
295 
296     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
297     return r;
298 }
299 
300 static TCGv_i64 load_freg32_i64(int reg)
301 {
302     TCGv_i64 r = tcg_temp_new_i64();
303 
304     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
305     return r;
306 }
307 
308 static void store_reg(int reg, TCGv_i64 v)
309 {
310     tcg_gen_mov_i64(regs[reg], v);
311 }
312 
313 static void store_freg(int reg, TCGv_i64 v)
314 {
315     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
316 }
317 
318 static void store_reg32_i64(int reg, TCGv_i64 v)
319 {
320     /* 32 bit register writes keep the upper half */
321     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
322 }
323 
324 static void store_reg32h_i64(int reg, TCGv_i64 v)
325 {
326     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
327 }
328 
329 static void store_freg32_i64(int reg, TCGv_i64 v)
330 {
331     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
332 }
333 
334 static void return_low128(TCGv_i64 dest)
335 {
336     tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
337 }
338 
339 static void update_psw_addr(DisasContext *s)
340 {
341     /* psw.addr */
342     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
343 }
344 
345 static void per_branch(DisasContext *s, bool to_next)
346 {
347 #ifndef CONFIG_USER_ONLY
348     tcg_gen_movi_i64(gbea, s->base.pc_next);
349 
350     if (s->base.tb->flags & FLAG_MASK_PER) {
351         TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr;
352         gen_helper_per_branch(cpu_env, gbea, next_pc);
353         if (to_next) {
354             tcg_temp_free_i64(next_pc);
355         }
356     }
357 #endif
358 }
359 
360 static void per_branch_cond(DisasContext *s, TCGCond cond,
361                             TCGv_i64 arg1, TCGv_i64 arg2)
362 {
363 #ifndef CONFIG_USER_ONLY
364     if (s->base.tb->flags & FLAG_MASK_PER) {
365         TCGLabel *lab = gen_new_label();
366         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
367 
368         tcg_gen_movi_i64(gbea, s->base.pc_next);
369         gen_helper_per_branch(cpu_env, gbea, psw_addr);
370 
371         gen_set_label(lab);
372     } else {
373         TCGv_i64 pc = tcg_const_i64(s->base.pc_next);
374         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375         tcg_temp_free_i64(pc);
376     }
377 #endif
378 }
379 
380 static void per_breaking_event(DisasContext *s)
381 {
382     tcg_gen_movi_i64(gbea, s->base.pc_next);
383 }
384 
385 static void update_cc_op(DisasContext *s)
386 {
387     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388         tcg_gen_movi_i32(cc_op, s->cc_op);
389     }
390 }
391 
392 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
393                                 uint64_t pc)
394 {
395     return (uint64_t)translator_lduw(env, &s->base, pc);
396 }
397 
398 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
399                                 uint64_t pc)
400 {
401     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
402 }
403 
404 static int get_mem_index(DisasContext *s)
405 {
406 #ifdef CONFIG_USER_ONLY
407     return MMU_USER_IDX;
408 #else
409     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
410         return MMU_REAL_IDX;
411     }
412 
413     switch (s->base.tb->flags & FLAG_MASK_ASC) {
414     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_PRIMARY_IDX;
416     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
417         return MMU_SECONDARY_IDX;
418     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
419         return MMU_HOME_IDX;
420     default:
421         tcg_abort();
422         break;
423     }
424 #endif
425 }
426 
427 static void gen_exception(int excp)
428 {
429     TCGv_i32 tmp = tcg_const_i32(excp);
430     gen_helper_exception(cpu_env, tmp);
431     tcg_temp_free_i32(tmp);
432 }
433 
434 static void gen_program_exception(DisasContext *s, int code)
435 {
436     TCGv_i32 tmp;
437 
438     /* Remember what pgm exception this was.  */
439     tmp = tcg_const_i32(code);
440     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
441     tcg_temp_free_i32(tmp);
442 
443     tmp = tcg_const_i32(s->ilen);
444     tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
445     tcg_temp_free_i32(tmp);
446 
447     /* update the psw */
448     update_psw_addr(s);
449 
450     /* Save off cc.  */
451     update_cc_op(s);
452 
453     /* Trigger exception.  */
454     gen_exception(EXCP_PGM);
455 }
456 
457 static inline void gen_illegal_opcode(DisasContext *s)
458 {
459     gen_program_exception(s, PGM_OPERATION);
460 }
461 
462 static inline void gen_data_exception(uint8_t dxc)
463 {
464     TCGv_i32 tmp = tcg_const_i32(dxc);
465     gen_helper_data_exception(cpu_env, tmp);
466     tcg_temp_free_i32(tmp);
467 }
468 
469 static inline void gen_trap(DisasContext *s)
470 {
471     /* Set DXC to 0xff */
472     gen_data_exception(0xff);
473 }
474 
475 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
476                                   int64_t imm)
477 {
478     tcg_gen_addi_i64(dst, src, imm);
479     if (!(s->base.tb->flags & FLAG_MASK_64)) {
480         if (s->base.tb->flags & FLAG_MASK_32) {
481             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
482         } else {
483             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
484         }
485     }
486 }
487 
488 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
489 {
490     TCGv_i64 tmp = tcg_temp_new_i64();
491 
492     /*
493      * Note that d2 is limited to 20 bits, signed.  If we crop negative
494      * displacements early we create larger immediate addends.
495      */
496     if (b2 && x2) {
497         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
498         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
499     } else if (b2) {
500         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
501     } else if (x2) {
502         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
503     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
504         if (s->base.tb->flags & FLAG_MASK_32) {
505             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
506         } else {
507             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
508         }
509     } else {
510         tcg_gen_movi_i64(tmp, d2);
511     }
512 
513     return tmp;
514 }
515 
516 static inline bool live_cc_data(DisasContext *s)
517 {
518     return (s->cc_op != CC_OP_DYNAMIC
519             && s->cc_op != CC_OP_STATIC
520             && s->cc_op > 3);
521 }
522 
523 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
524 {
525     if (live_cc_data(s)) {
526         tcg_gen_discard_i64(cc_src);
527         tcg_gen_discard_i64(cc_dst);
528         tcg_gen_discard_i64(cc_vr);
529     }
530     s->cc_op = CC_OP_CONST0 + val;
531 }
532 
533 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
534 {
535     if (live_cc_data(s)) {
536         tcg_gen_discard_i64(cc_src);
537         tcg_gen_discard_i64(cc_vr);
538     }
539     tcg_gen_mov_i64(cc_dst, dst);
540     s->cc_op = op;
541 }
542 
543 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
544                                   TCGv_i64 dst)
545 {
546     if (live_cc_data(s)) {
547         tcg_gen_discard_i64(cc_vr);
548     }
549     tcg_gen_mov_i64(cc_src, src);
550     tcg_gen_mov_i64(cc_dst, dst);
551     s->cc_op = op;
552 }
553 
554 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
555                                   TCGv_i64 dst, TCGv_i64 vr)
556 {
557     tcg_gen_mov_i64(cc_src, src);
558     tcg_gen_mov_i64(cc_dst, dst);
559     tcg_gen_mov_i64(cc_vr, vr);
560     s->cc_op = op;
561 }
562 
563 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
564 {
565     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
566 }
567 
568 /* CC value is in env->cc_op */
569 static void set_cc_static(DisasContext *s)
570 {
571     if (live_cc_data(s)) {
572         tcg_gen_discard_i64(cc_src);
573         tcg_gen_discard_i64(cc_dst);
574         tcg_gen_discard_i64(cc_vr);
575     }
576     s->cc_op = CC_OP_STATIC;
577 }
578 
579 /* calculates cc into cc_op */
580 static void gen_op_calc_cc(DisasContext *s)
581 {
582     TCGv_i32 local_cc_op = NULL;
583     TCGv_i64 dummy = NULL;
584 
585     switch (s->cc_op) {
586     default:
587         dummy = tcg_const_i64(0);
588         /* FALLTHRU */
589     case CC_OP_ADD_64:
590     case CC_OP_SUB_64:
591     case CC_OP_ADD_32:
592     case CC_OP_SUB_32:
593         local_cc_op = tcg_const_i32(s->cc_op);
594         break;
595     case CC_OP_CONST0:
596     case CC_OP_CONST1:
597     case CC_OP_CONST2:
598     case CC_OP_CONST3:
599     case CC_OP_STATIC:
600     case CC_OP_DYNAMIC:
601         break;
602     }
603 
604     switch (s->cc_op) {
605     case CC_OP_CONST0:
606     case CC_OP_CONST1:
607     case CC_OP_CONST2:
608     case CC_OP_CONST3:
609         /* s->cc_op is the cc value */
610         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
611         break;
612     case CC_OP_STATIC:
613         /* env->cc_op already is the cc value */
614         break;
615     case CC_OP_NZ:
616     case CC_OP_ABS_64:
617     case CC_OP_NABS_64:
618     case CC_OP_ABS_32:
619     case CC_OP_NABS_32:
620     case CC_OP_LTGT0_32:
621     case CC_OP_LTGT0_64:
622     case CC_OP_COMP_32:
623     case CC_OP_COMP_64:
624     case CC_OP_NZ_F32:
625     case CC_OP_NZ_F64:
626     case CC_OP_FLOGR:
627     case CC_OP_LCBB:
628     case CC_OP_MULS_32:
629         /* 1 argument */
630         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
631         break;
632     case CC_OP_ADDU:
633     case CC_OP_ICM:
634     case CC_OP_LTGT_32:
635     case CC_OP_LTGT_64:
636     case CC_OP_LTUGTU_32:
637     case CC_OP_LTUGTU_64:
638     case CC_OP_TM_32:
639     case CC_OP_TM_64:
640     case CC_OP_SLA:
641     case CC_OP_SUBU:
642     case CC_OP_NZ_F128:
643     case CC_OP_VC:
644     case CC_OP_MULS_64:
645         /* 2 arguments */
646         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
647         break;
648     case CC_OP_ADD_64:
649     case CC_OP_SUB_64:
650     case CC_OP_ADD_32:
651     case CC_OP_SUB_32:
652         /* 3 arguments */
653         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
654         break;
655     case CC_OP_DYNAMIC:
656         /* unknown operation - assume 3 arguments and cc_op in env */
657         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
658         break;
659     default:
660         tcg_abort();
661     }
662 
663     if (local_cc_op) {
664         tcg_temp_free_i32(local_cc_op);
665     }
666     if (dummy) {
667         tcg_temp_free_i64(dummy);
668     }
669 
670     /* We now have cc in cc_op as constant */
671     set_cc_static(s);
672 }
673 
674 static bool use_goto_tb(DisasContext *s, uint64_t dest)
675 {
676     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
677         return false;
678     }
679     return translator_use_goto_tb(&s->base, dest);
680 }
681 
682 static void account_noninline_branch(DisasContext *s, int cc_op)
683 {
684 #ifdef DEBUG_INLINE_BRANCHES
685     inline_branch_miss[cc_op]++;
686 #endif
687 }
688 
689 static void account_inline_branch(DisasContext *s, int cc_op)
690 {
691 #ifdef DEBUG_INLINE_BRANCHES
692     inline_branch_hit[cc_op]++;
693 #endif
694 }
695 
696 /* Table of mask values to comparison codes, given a comparison as input.
697    For such, CC=3 should not be possible.  */
698 static const TCGCond ltgt_cond[16] = {
699     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
700     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
701     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
702     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
703     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
704     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
705     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
706     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
707 };
708 
709 /* Table of mask values to comparison codes, given a logic op as input.
710    For such, only CC=0 and CC=1 should be possible.  */
711 static const TCGCond nz_cond[16] = {
712     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
713     TCG_COND_NEVER, TCG_COND_NEVER,
714     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
715     TCG_COND_NE, TCG_COND_NE,
716     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
717     TCG_COND_EQ, TCG_COND_EQ,
718     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
719     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
720 };
721 
722 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
723    details required to generate a TCG comparison.  */
724 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
725 {
726     TCGCond cond;
727     enum cc_op old_cc_op = s->cc_op;
728 
729     if (mask == 15 || mask == 0) {
730         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
731         c->u.s32.a = cc_op;
732         c->u.s32.b = cc_op;
733         c->g1 = c->g2 = true;
734         c->is_64 = false;
735         return;
736     }
737 
738     /* Find the TCG condition for the mask + cc op.  */
739     switch (old_cc_op) {
740     case CC_OP_LTGT0_32:
741     case CC_OP_LTGT0_64:
742     case CC_OP_LTGT_32:
743     case CC_OP_LTGT_64:
744         cond = ltgt_cond[mask];
745         if (cond == TCG_COND_NEVER) {
746             goto do_dynamic;
747         }
748         account_inline_branch(s, old_cc_op);
749         break;
750 
751     case CC_OP_LTUGTU_32:
752     case CC_OP_LTUGTU_64:
753         cond = tcg_unsigned_cond(ltgt_cond[mask]);
754         if (cond == TCG_COND_NEVER) {
755             goto do_dynamic;
756         }
757         account_inline_branch(s, old_cc_op);
758         break;
759 
760     case CC_OP_NZ:
761         cond = nz_cond[mask];
762         if (cond == TCG_COND_NEVER) {
763             goto do_dynamic;
764         }
765         account_inline_branch(s, old_cc_op);
766         break;
767 
768     case CC_OP_TM_32:
769     case CC_OP_TM_64:
770         switch (mask) {
771         case 8:
772             cond = TCG_COND_EQ;
773             break;
774         case 4 | 2 | 1:
775             cond = TCG_COND_NE;
776             break;
777         default:
778             goto do_dynamic;
779         }
780         account_inline_branch(s, old_cc_op);
781         break;
782 
783     case CC_OP_ICM:
784         switch (mask) {
785         case 8:
786             cond = TCG_COND_EQ;
787             break;
788         case 4 | 2 | 1:
789         case 4 | 2:
790             cond = TCG_COND_NE;
791             break;
792         default:
793             goto do_dynamic;
794         }
795         account_inline_branch(s, old_cc_op);
796         break;
797 
798     case CC_OP_FLOGR:
799         switch (mask & 0xa) {
800         case 8: /* src == 0 -> no one bit found */
801             cond = TCG_COND_EQ;
802             break;
803         case 2: /* src != 0 -> one bit found */
804             cond = TCG_COND_NE;
805             break;
806         default:
807             goto do_dynamic;
808         }
809         account_inline_branch(s, old_cc_op);
810         break;
811 
812     case CC_OP_ADDU:
813     case CC_OP_SUBU:
814         switch (mask) {
815         case 8 | 2: /* result == 0 */
816             cond = TCG_COND_EQ;
817             break;
818         case 4 | 1: /* result != 0 */
819             cond = TCG_COND_NE;
820             break;
821         case 8 | 4: /* !carry (borrow) */
822             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
823             break;
824         case 2 | 1: /* carry (!borrow) */
825             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
826             break;
827         default:
828             goto do_dynamic;
829         }
830         account_inline_branch(s, old_cc_op);
831         break;
832 
833     default:
834     do_dynamic:
835         /* Calculate cc value.  */
836         gen_op_calc_cc(s);
837         /* FALLTHRU */
838 
839     case CC_OP_STATIC:
840         /* Jump based on CC.  We'll load up the real cond below;
841            the assignment here merely avoids a compiler warning.  */
842         account_noninline_branch(s, old_cc_op);
843         old_cc_op = CC_OP_STATIC;
844         cond = TCG_COND_NEVER;
845         break;
846     }
847 
848     /* Load up the arguments of the comparison.  */
849     c->is_64 = true;
850     c->g1 = c->g2 = false;
851     switch (old_cc_op) {
852     case CC_OP_LTGT0_32:
853         c->is_64 = false;
854         c->u.s32.a = tcg_temp_new_i32();
855         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
856         c->u.s32.b = tcg_const_i32(0);
857         break;
858     case CC_OP_LTGT_32:
859     case CC_OP_LTUGTU_32:
860         c->is_64 = false;
861         c->u.s32.a = tcg_temp_new_i32();
862         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
863         c->u.s32.b = tcg_temp_new_i32();
864         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
865         break;
866 
867     case CC_OP_LTGT0_64:
868     case CC_OP_NZ:
869     case CC_OP_FLOGR:
870         c->u.s64.a = cc_dst;
871         c->u.s64.b = tcg_const_i64(0);
872         c->g1 = true;
873         break;
874     case CC_OP_LTGT_64:
875     case CC_OP_LTUGTU_64:
876         c->u.s64.a = cc_src;
877         c->u.s64.b = cc_dst;
878         c->g1 = c->g2 = true;
879         break;
880 
881     case CC_OP_TM_32:
882     case CC_OP_TM_64:
883     case CC_OP_ICM:
884         c->u.s64.a = tcg_temp_new_i64();
885         c->u.s64.b = tcg_const_i64(0);
886         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
887         break;
888 
889     case CC_OP_ADDU:
890     case CC_OP_SUBU:
891         c->is_64 = true;
892         c->u.s64.b = tcg_const_i64(0);
893         c->g1 = true;
894         switch (mask) {
895         case 8 | 2:
896         case 4 | 1: /* result */
897             c->u.s64.a = cc_dst;
898             break;
899         case 8 | 4:
900         case 2 | 1: /* carry */
901             c->u.s64.a = cc_src;
902             break;
903         default:
904             g_assert_not_reached();
905         }
906         break;
907 
908     case CC_OP_STATIC:
909         c->is_64 = false;
910         c->u.s32.a = cc_op;
911         c->g1 = true;
912         switch (mask) {
913         case 0x8 | 0x4 | 0x2: /* cc != 3 */
914             cond = TCG_COND_NE;
915             c->u.s32.b = tcg_const_i32(3);
916             break;
917         case 0x8 | 0x4 | 0x1: /* cc != 2 */
918             cond = TCG_COND_NE;
919             c->u.s32.b = tcg_const_i32(2);
920             break;
921         case 0x8 | 0x2 | 0x1: /* cc != 1 */
922             cond = TCG_COND_NE;
923             c->u.s32.b = tcg_const_i32(1);
924             break;
925         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
926             cond = TCG_COND_EQ;
927             c->g1 = false;
928             c->u.s32.a = tcg_temp_new_i32();
929             c->u.s32.b = tcg_const_i32(0);
930             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
931             break;
932         case 0x8 | 0x4: /* cc < 2 */
933             cond = TCG_COND_LTU;
934             c->u.s32.b = tcg_const_i32(2);
935             break;
936         case 0x8: /* cc == 0 */
937             cond = TCG_COND_EQ;
938             c->u.s32.b = tcg_const_i32(0);
939             break;
940         case 0x4 | 0x2 | 0x1: /* cc != 0 */
941             cond = TCG_COND_NE;
942             c->u.s32.b = tcg_const_i32(0);
943             break;
944         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
945             cond = TCG_COND_NE;
946             c->g1 = false;
947             c->u.s32.a = tcg_temp_new_i32();
948             c->u.s32.b = tcg_const_i32(0);
949             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
950             break;
951         case 0x4: /* cc == 1 */
952             cond = TCG_COND_EQ;
953             c->u.s32.b = tcg_const_i32(1);
954             break;
955         case 0x2 | 0x1: /* cc > 1 */
956             cond = TCG_COND_GTU;
957             c->u.s32.b = tcg_const_i32(1);
958             break;
959         case 0x2: /* cc == 2 */
960             cond = TCG_COND_EQ;
961             c->u.s32.b = tcg_const_i32(2);
962             break;
963         case 0x1: /* cc == 3 */
964             cond = TCG_COND_EQ;
965             c->u.s32.b = tcg_const_i32(3);
966             break;
967         default:
968             /* CC is masked by something else: (8 >> cc) & mask.  */
969             cond = TCG_COND_NE;
970             c->g1 = false;
971             c->u.s32.a = tcg_const_i32(8);
972             c->u.s32.b = tcg_const_i32(0);
973             tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
974             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
975             break;
976         }
977         break;
978 
979     default:
980         abort();
981     }
982     c->cond = cond;
983 }
984 
985 static void free_compare(DisasCompare *c)
986 {
987     if (!c->g1) {
988         if (c->is_64) {
989             tcg_temp_free_i64(c->u.s64.a);
990         } else {
991             tcg_temp_free_i32(c->u.s32.a);
992         }
993     }
994     if (!c->g2) {
995         if (c->is_64) {
996             tcg_temp_free_i64(c->u.s64.b);
997         } else {
998             tcg_temp_free_i32(c->u.s32.b);
999         }
1000     }
1001 }
1002 
1003 /* ====================================================================== */
1004 /* Define the insn format enumeration.  */
1005 #define F0(N)                         FMT_##N,
1006 #define F1(N, X1)                     F0(N)
1007 #define F2(N, X1, X2)                 F0(N)
1008 #define F3(N, X1, X2, X3)             F0(N)
1009 #define F4(N, X1, X2, X3, X4)         F0(N)
1010 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
1011 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
1012 
1013 typedef enum {
1014 #include "insn-format.h.inc"
1015 } DisasFormat;
1016 
1017 #undef F0
1018 #undef F1
1019 #undef F2
1020 #undef F3
1021 #undef F4
1022 #undef F5
1023 #undef F6
1024 
1025 /* This is the way fields are to be accessed out of DisasFields.  */
1026 #define have_field(S, F)  have_field1((S), FLD_O_##F)
1027 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
1028 
1029 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
1030 {
1031     return (s->fields.presentO >> c) & 1;
1032 }
1033 
1034 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
1035                       enum DisasFieldIndexC c)
1036 {
1037     assert(have_field1(s, o));
1038     return s->fields.c[c];
1039 }
1040 
1041 /* Describe the layout of each field in each format.  */
1042 typedef struct DisasField {
1043     unsigned int beg:8;
1044     unsigned int size:8;
1045     unsigned int type:2;
1046     unsigned int indexC:6;
1047     enum DisasFieldIndexO indexO:8;
1048 } DisasField;
1049 
1050 typedef struct DisasFormatInfo {
1051     DisasField op[NUM_C_FIELD];
1052 } DisasFormatInfo;
1053 
1054 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1055 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1056 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1057 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1058                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1059 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1060                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1061                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1062 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1063                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1064 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1065                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1066                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1067 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1068 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1069 
1070 #define F0(N)                     { { } },
1071 #define F1(N, X1)                 { { X1 } },
1072 #define F2(N, X1, X2)             { { X1, X2 } },
1073 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1074 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1075 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1076 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1077 
1078 static const DisasFormatInfo format_info[] = {
1079 #include "insn-format.h.inc"
1080 };
1081 
1082 #undef F0
1083 #undef F1
1084 #undef F2
1085 #undef F3
1086 #undef F4
1087 #undef F5
1088 #undef F6
1089 #undef R
1090 #undef M
1091 #undef V
1092 #undef BD
1093 #undef BXD
1094 #undef BDL
1095 #undef BXDL
1096 #undef I
1097 #undef L
1098 
1099 /* Generally, we'll extract operands into this structures, operate upon
1100    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1101    of routines below for more details.  */
1102 typedef struct {
1103     bool g_out, g_out2, g_in1, g_in2;
1104     TCGv_i64 out, out2, in1, in2;
1105     TCGv_i64 addr1;
1106 } DisasOps;
1107 
1108 /* Instructions can place constraints on their operands, raising specification
1109    exceptions if they are violated.  To make this easy to automate, each "in1",
1110    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1111    of the following, or 0.  To make this easy to document, we'll put the
1112    SPEC_<name> defines next to <name>.  */
1113 
1114 #define SPEC_r1_even    1
1115 #define SPEC_r2_even    2
1116 #define SPEC_r3_even    4
1117 #define SPEC_r1_f128    8
1118 #define SPEC_r2_f128    16
1119 
1120 /* Return values from translate_one, indicating the state of the TB.  */
1121 
1122 /* We are not using a goto_tb (for whatever reason), but have updated
1123    the PC (for whatever reason), so there's no need to do it again on
1124    exiting the TB.  */
1125 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1126 
1127 /* We have updated the PC and CC values.  */
1128 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1129 
1130 
1131 /* Instruction flags */
1132 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1133 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1134 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1135 #define IF_BFP      0x0008      /* binary floating point instruction */
1136 #define IF_DFP      0x0010      /* decimal floating point instruction */
1137 #define IF_PRIV     0x0020      /* privileged instruction */
1138 #define IF_VEC      0x0040      /* vector instruction */
1139 #define IF_IO       0x0080      /* input/output instruction */
1140 
1141 struct DisasInsn {
1142     unsigned opc:16;
1143     unsigned flags:16;
1144     DisasFormat fmt:8;
1145     unsigned fac:8;
1146     unsigned spec:8;
1147 
1148     const char *name;
1149 
1150     /* Pre-process arguments before HELP_OP.  */
1151     void (*help_in1)(DisasContext *, DisasOps *);
1152     void (*help_in2)(DisasContext *, DisasOps *);
1153     void (*help_prep)(DisasContext *, DisasOps *);
1154 
1155     /*
1156      * Post-process output after HELP_OP.
1157      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1158      */
1159     void (*help_wout)(DisasContext *, DisasOps *);
1160     void (*help_cout)(DisasContext *, DisasOps *);
1161 
1162     /* Implement the operation itself.  */
1163     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1164 
1165     uint64_t data;
1166 };
1167 
1168 /* ====================================================================== */
1169 /* Miscellaneous helpers, used by several operations.  */
1170 
1171 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1172 {
1173     if (dest == s->pc_tmp) {
1174         per_branch(s, true);
1175         return DISAS_NEXT;
1176     }
1177     if (use_goto_tb(s, dest)) {
1178         update_cc_op(s);
1179         per_breaking_event(s);
1180         tcg_gen_goto_tb(0);
1181         tcg_gen_movi_i64(psw_addr, dest);
1182         tcg_gen_exit_tb(s->base.tb, 0);
1183         return DISAS_NORETURN;
1184     } else {
1185         tcg_gen_movi_i64(psw_addr, dest);
1186         per_branch(s, false);
1187         return DISAS_PC_UPDATED;
1188     }
1189 }
1190 
1191 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1192                                  bool is_imm, int imm, TCGv_i64 cdest)
1193 {
1194     DisasJumpType ret;
1195     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1196     TCGLabel *lab;
1197 
1198     /* Take care of the special cases first.  */
1199     if (c->cond == TCG_COND_NEVER) {
1200         ret = DISAS_NEXT;
1201         goto egress;
1202     }
1203     if (is_imm) {
1204         if (dest == s->pc_tmp) {
1205             /* Branch to next.  */
1206             per_branch(s, true);
1207             ret = DISAS_NEXT;
1208             goto egress;
1209         }
1210         if (c->cond == TCG_COND_ALWAYS) {
1211             ret = help_goto_direct(s, dest);
1212             goto egress;
1213         }
1214     } else {
1215         if (!cdest) {
1216             /* E.g. bcr %r0 -> no branch.  */
1217             ret = DISAS_NEXT;
1218             goto egress;
1219         }
1220         if (c->cond == TCG_COND_ALWAYS) {
1221             tcg_gen_mov_i64(psw_addr, cdest);
1222             per_branch(s, false);
1223             ret = DISAS_PC_UPDATED;
1224             goto egress;
1225         }
1226     }
1227 
1228     if (use_goto_tb(s, s->pc_tmp)) {
1229         if (is_imm && use_goto_tb(s, dest)) {
1230             /* Both exits can use goto_tb.  */
1231             update_cc_op(s);
1232 
1233             lab = gen_new_label();
1234             if (c->is_64) {
1235                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1236             } else {
1237                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1238             }
1239 
1240             /* Branch not taken.  */
1241             tcg_gen_goto_tb(0);
1242             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1243             tcg_gen_exit_tb(s->base.tb, 0);
1244 
1245             /* Branch taken.  */
1246             gen_set_label(lab);
1247             per_breaking_event(s);
1248             tcg_gen_goto_tb(1);
1249             tcg_gen_movi_i64(psw_addr, dest);
1250             tcg_gen_exit_tb(s->base.tb, 1);
1251 
1252             ret = DISAS_NORETURN;
1253         } else {
1254             /* Fallthru can use goto_tb, but taken branch cannot.  */
1255             /* Store taken branch destination before the brcond.  This
1256                avoids having to allocate a new local temp to hold it.
1257                We'll overwrite this in the not taken case anyway.  */
1258             if (!is_imm) {
1259                 tcg_gen_mov_i64(psw_addr, cdest);
1260             }
1261 
1262             lab = gen_new_label();
1263             if (c->is_64) {
1264                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1265             } else {
1266                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1267             }
1268 
1269             /* Branch not taken.  */
1270             update_cc_op(s);
1271             tcg_gen_goto_tb(0);
1272             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1273             tcg_gen_exit_tb(s->base.tb, 0);
1274 
1275             gen_set_label(lab);
1276             if (is_imm) {
1277                 tcg_gen_movi_i64(psw_addr, dest);
1278             }
1279             per_breaking_event(s);
1280             ret = DISAS_PC_UPDATED;
1281         }
1282     } else {
1283         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1284            Most commonly we're single-stepping or some other condition that
1285            disables all use of goto_tb.  Just update the PC and exit.  */
1286 
1287         TCGv_i64 next = tcg_const_i64(s->pc_tmp);
1288         if (is_imm) {
1289             cdest = tcg_const_i64(dest);
1290         }
1291 
1292         if (c->is_64) {
1293             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1294                                 cdest, next);
1295             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1296         } else {
1297             TCGv_i32 t0 = tcg_temp_new_i32();
1298             TCGv_i64 t1 = tcg_temp_new_i64();
1299             TCGv_i64 z = tcg_const_i64(0);
1300             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1301             tcg_gen_extu_i32_i64(t1, t0);
1302             tcg_temp_free_i32(t0);
1303             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1304             per_branch_cond(s, TCG_COND_NE, t1, z);
1305             tcg_temp_free_i64(t1);
1306             tcg_temp_free_i64(z);
1307         }
1308 
1309         if (is_imm) {
1310             tcg_temp_free_i64(cdest);
1311         }
1312         tcg_temp_free_i64(next);
1313 
1314         ret = DISAS_PC_UPDATED;
1315     }
1316 
1317  egress:
1318     free_compare(c);
1319     return ret;
1320 }
1321 
1322 /* ====================================================================== */
1323 /* The operations.  These perform the bulk of the work for any insn,
1324    usually after the operands have been loaded and output initialized.  */
1325 
1326 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1327 {
1328     tcg_gen_abs_i64(o->out, o->in2);
1329     return DISAS_NEXT;
1330 }
1331 
1332 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1333 {
1334     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1335     return DISAS_NEXT;
1336 }
1337 
1338 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1339 {
1340     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1341     return DISAS_NEXT;
1342 }
1343 
1344 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1345 {
1346     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1347     tcg_gen_mov_i64(o->out2, o->in2);
1348     return DISAS_NEXT;
1349 }
1350 
1351 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1352 {
1353     tcg_gen_add_i64(o->out, o->in1, o->in2);
1354     return DISAS_NEXT;
1355 }
1356 
1357 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1358 {
1359     tcg_gen_movi_i64(cc_src, 0);
1360     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1361     return DISAS_NEXT;
1362 }
1363 
1364 /* Compute carry into cc_src. */
1365 static void compute_carry(DisasContext *s)
1366 {
1367     switch (s->cc_op) {
1368     case CC_OP_ADDU:
1369         /* The carry value is already in cc_src (1,0). */
1370         break;
1371     case CC_OP_SUBU:
1372         tcg_gen_addi_i64(cc_src, cc_src, 1);
1373         break;
1374     default:
1375         gen_op_calc_cc(s);
1376         /* fall through */
1377     case CC_OP_STATIC:
1378         /* The carry flag is the msb of CC; compute into cc_src. */
1379         tcg_gen_extu_i32_i64(cc_src, cc_op);
1380         tcg_gen_shri_i64(cc_src, cc_src, 1);
1381         break;
1382     }
1383 }
1384 
1385 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1386 {
1387     compute_carry(s);
1388     tcg_gen_add_i64(o->out, o->in1, o->in2);
1389     tcg_gen_add_i64(o->out, o->out, cc_src);
1390     return DISAS_NEXT;
1391 }
1392 
1393 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1394 {
1395     compute_carry(s);
1396 
1397     TCGv_i64 zero = tcg_const_i64(0);
1398     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1399     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1400     tcg_temp_free_i64(zero);
1401 
1402     return DISAS_NEXT;
1403 }
1404 
1405 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1406 {
1407     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1408 
1409     o->in1 = tcg_temp_new_i64();
1410     if (non_atomic) {
1411         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1412     } else {
1413         /* Perform the atomic addition in memory. */
1414         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1415                                      s->insn->data);
1416     }
1417 
1418     /* Recompute also for atomic case: needed for setting CC. */
1419     tcg_gen_add_i64(o->out, o->in1, o->in2);
1420 
1421     if (non_atomic) {
1422         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1423     }
1424     return DISAS_NEXT;
1425 }
1426 
1427 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1428 {
1429     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1430 
1431     o->in1 = tcg_temp_new_i64();
1432     if (non_atomic) {
1433         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1434     } else {
1435         /* Perform the atomic addition in memory. */
1436         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1437                                      s->insn->data);
1438     }
1439 
1440     /* Recompute also for atomic case: needed for setting CC. */
1441     tcg_gen_movi_i64(cc_src, 0);
1442     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1443 
1444     if (non_atomic) {
1445         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1446     }
1447     return DISAS_NEXT;
1448 }
1449 
1450 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1451 {
1452     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1453     return DISAS_NEXT;
1454 }
1455 
1456 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1457 {
1458     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1459     return DISAS_NEXT;
1460 }
1461 
1462 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1463 {
1464     gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1465     return_low128(o->out2);
1466     return DISAS_NEXT;
1467 }
1468 
1469 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1470 {
1471     tcg_gen_and_i64(o->out, o->in1, o->in2);
1472     return DISAS_NEXT;
1473 }
1474 
1475 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1476 {
1477     int shift = s->insn->data & 0xff;
1478     int size = s->insn->data >> 8;
1479     uint64_t mask = ((1ull << size) - 1) << shift;
1480 
1481     assert(!o->g_in2);
1482     tcg_gen_shli_i64(o->in2, o->in2, shift);
1483     tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1484     tcg_gen_and_i64(o->out, o->in1, o->in2);
1485 
1486     /* Produce the CC from only the bits manipulated.  */
1487     tcg_gen_andi_i64(cc_dst, o->out, mask);
1488     set_cc_nz_u64(s, cc_dst);
1489     return DISAS_NEXT;
1490 }
1491 
1492 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1493 {
1494     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1495     return DISAS_NEXT;
1496 }
1497 
1498 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1499 {
1500     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1501     return DISAS_NEXT;
1502 }
1503 
1504 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1505 {
1506     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1507     return DISAS_NEXT;
1508 }
1509 
1510 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1511 {
1512     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1513     return DISAS_NEXT;
1514 }
1515 
1516 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1517 {
1518     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1519     return DISAS_NEXT;
1520 }
1521 
1522 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1523 {
1524     o->in1 = tcg_temp_new_i64();
1525 
1526     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1527         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1528     } else {
1529         /* Perform the atomic operation in memory. */
1530         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1531                                      s->insn->data);
1532     }
1533 
1534     /* Recompute also for atomic case: needed for setting CC. */
1535     tcg_gen_and_i64(o->out, o->in1, o->in2);
1536 
1537     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1538         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1539     }
1540     return DISAS_NEXT;
1541 }
1542 
1543 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1544 {
1545     pc_to_link_info(o->out, s, s->pc_tmp);
1546     if (o->in2) {
1547         tcg_gen_mov_i64(psw_addr, o->in2);
1548         per_branch(s, false);
1549         return DISAS_PC_UPDATED;
1550     } else {
1551         return DISAS_NEXT;
1552     }
1553 }
1554 
1555 static void save_link_info(DisasContext *s, DisasOps *o)
1556 {
1557     TCGv_i64 t;
1558 
1559     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1560         pc_to_link_info(o->out, s, s->pc_tmp);
1561         return;
1562     }
1563     gen_op_calc_cc(s);
1564     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1565     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1566     t = tcg_temp_new_i64();
1567     tcg_gen_shri_i64(t, psw_mask, 16);
1568     tcg_gen_andi_i64(t, t, 0x0f000000);
1569     tcg_gen_or_i64(o->out, o->out, t);
1570     tcg_gen_extu_i32_i64(t, cc_op);
1571     tcg_gen_shli_i64(t, t, 28);
1572     tcg_gen_or_i64(o->out, o->out, t);
1573     tcg_temp_free_i64(t);
1574 }
1575 
1576 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1577 {
1578     save_link_info(s, o);
1579     if (o->in2) {
1580         tcg_gen_mov_i64(psw_addr, o->in2);
1581         per_branch(s, false);
1582         return DISAS_PC_UPDATED;
1583     } else {
1584         return DISAS_NEXT;
1585     }
1586 }
1587 
1588 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1589 {
1590     pc_to_link_info(o->out, s, s->pc_tmp);
1591     return help_goto_direct(s, s->base.pc_next + (int64_t)get_field(s, i2) * 2);
1592 }
1593 
1594 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1595 {
1596     int m1 = get_field(s, m1);
1597     bool is_imm = have_field(s, i2);
1598     int imm = is_imm ? get_field(s, i2) : 0;
1599     DisasCompare c;
1600 
1601     /* BCR with R2 = 0 causes no branching */
1602     if (have_field(s, r2) && get_field(s, r2) == 0) {
1603         if (m1 == 14) {
1604             /* Perform serialization */
1605             /* FIXME: check for fast-BCR-serialization facility */
1606             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1607         }
1608         if (m1 == 15) {
1609             /* Perform serialization */
1610             /* FIXME: perform checkpoint-synchronisation */
1611             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1612         }
1613         return DISAS_NEXT;
1614     }
1615 
1616     disas_jcc(s, &c, m1);
1617     return help_branch(s, &c, is_imm, imm, o->in2);
1618 }
1619 
1620 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1621 {
1622     int r1 = get_field(s, r1);
1623     bool is_imm = have_field(s, i2);
1624     int imm = is_imm ? get_field(s, i2) : 0;
1625     DisasCompare c;
1626     TCGv_i64 t;
1627 
1628     c.cond = TCG_COND_NE;
1629     c.is_64 = false;
1630     c.g1 = false;
1631     c.g2 = false;
1632 
1633     t = tcg_temp_new_i64();
1634     tcg_gen_subi_i64(t, regs[r1], 1);
1635     store_reg32_i64(r1, t);
1636     c.u.s32.a = tcg_temp_new_i32();
1637     c.u.s32.b = tcg_const_i32(0);
1638     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1639     tcg_temp_free_i64(t);
1640 
1641     return help_branch(s, &c, is_imm, imm, o->in2);
1642 }
1643 
1644 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1645 {
1646     int r1 = get_field(s, r1);
1647     int imm = get_field(s, i2);
1648     DisasCompare c;
1649     TCGv_i64 t;
1650 
1651     c.cond = TCG_COND_NE;
1652     c.is_64 = false;
1653     c.g1 = false;
1654     c.g2 = false;
1655 
1656     t = tcg_temp_new_i64();
1657     tcg_gen_shri_i64(t, regs[r1], 32);
1658     tcg_gen_subi_i64(t, t, 1);
1659     store_reg32h_i64(r1, t);
1660     c.u.s32.a = tcg_temp_new_i32();
1661     c.u.s32.b = tcg_const_i32(0);
1662     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1663     tcg_temp_free_i64(t);
1664 
1665     return help_branch(s, &c, 1, imm, o->in2);
1666 }
1667 
1668 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1669 {
1670     int r1 = get_field(s, r1);
1671     bool is_imm = have_field(s, i2);
1672     int imm = is_imm ? get_field(s, i2) : 0;
1673     DisasCompare c;
1674 
1675     c.cond = TCG_COND_NE;
1676     c.is_64 = true;
1677     c.g1 = true;
1678     c.g2 = false;
1679 
1680     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1681     c.u.s64.a = regs[r1];
1682     c.u.s64.b = tcg_const_i64(0);
1683 
1684     return help_branch(s, &c, is_imm, imm, o->in2);
1685 }
1686 
1687 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1688 {
1689     int r1 = get_field(s, r1);
1690     int r3 = get_field(s, r3);
1691     bool is_imm = have_field(s, i2);
1692     int imm = is_imm ? get_field(s, i2) : 0;
1693     DisasCompare c;
1694     TCGv_i64 t;
1695 
1696     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1697     c.is_64 = false;
1698     c.g1 = false;
1699     c.g2 = false;
1700 
1701     t = tcg_temp_new_i64();
1702     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1703     c.u.s32.a = tcg_temp_new_i32();
1704     c.u.s32.b = tcg_temp_new_i32();
1705     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1706     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1707     store_reg32_i64(r1, t);
1708     tcg_temp_free_i64(t);
1709 
1710     return help_branch(s, &c, is_imm, imm, o->in2);
1711 }
1712 
1713 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1714 {
1715     int r1 = get_field(s, r1);
1716     int r3 = get_field(s, r3);
1717     bool is_imm = have_field(s, i2);
1718     int imm = is_imm ? get_field(s, i2) : 0;
1719     DisasCompare c;
1720 
1721     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1722     c.is_64 = true;
1723 
1724     if (r1 == (r3 | 1)) {
1725         c.u.s64.b = load_reg(r3 | 1);
1726         c.g2 = false;
1727     } else {
1728         c.u.s64.b = regs[r3 | 1];
1729         c.g2 = true;
1730     }
1731 
1732     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1733     c.u.s64.a = regs[r1];
1734     c.g1 = true;
1735 
1736     return help_branch(s, &c, is_imm, imm, o->in2);
1737 }
1738 
1739 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1740 {
1741     int imm, m3 = get_field(s, m3);
1742     bool is_imm;
1743     DisasCompare c;
1744 
1745     c.cond = ltgt_cond[m3];
1746     if (s->insn->data) {
1747         c.cond = tcg_unsigned_cond(c.cond);
1748     }
1749     c.is_64 = c.g1 = c.g2 = true;
1750     c.u.s64.a = o->in1;
1751     c.u.s64.b = o->in2;
1752 
1753     is_imm = have_field(s, i4);
1754     if (is_imm) {
1755         imm = get_field(s, i4);
1756     } else {
1757         imm = 0;
1758         o->out = get_address(s, 0, get_field(s, b4),
1759                              get_field(s, d4));
1760     }
1761 
1762     return help_branch(s, &c, is_imm, imm, o->out);
1763 }
1764 
1765 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1766 {
1767     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1768     set_cc_static(s);
1769     return DISAS_NEXT;
1770 }
1771 
1772 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1773 {
1774     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1775     set_cc_static(s);
1776     return DISAS_NEXT;
1777 }
1778 
1779 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1780 {
1781     gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1782     set_cc_static(s);
1783     return DISAS_NEXT;
1784 }
1785 
1786 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1787                                    bool m4_with_fpe)
1788 {
1789     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1790     uint8_t m3 = get_field(s, m3);
1791     uint8_t m4 = get_field(s, m4);
1792 
1793     /* m3 field was introduced with FPE */
1794     if (!fpe && m3_with_fpe) {
1795         m3 = 0;
1796     }
1797     /* m4 field was introduced with FPE */
1798     if (!fpe && m4_with_fpe) {
1799         m4 = 0;
1800     }
1801 
1802     /* Check for valid rounding modes. Mode 3 was introduced later. */
1803     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1804         gen_program_exception(s, PGM_SPECIFICATION);
1805         return NULL;
1806     }
1807 
1808     return tcg_const_i32(deposit32(m3, 4, 4, m4));
1809 }
1810 
1811 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1812 {
1813     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1814 
1815     if (!m34) {
1816         return DISAS_NORETURN;
1817     }
1818     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1819     tcg_temp_free_i32(m34);
1820     set_cc_static(s);
1821     return DISAS_NEXT;
1822 }
1823 
1824 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1825 {
1826     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1827 
1828     if (!m34) {
1829         return DISAS_NORETURN;
1830     }
1831     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1832     tcg_temp_free_i32(m34);
1833     set_cc_static(s);
1834     return DISAS_NEXT;
1835 }
1836 
1837 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1838 {
1839     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1840 
1841     if (!m34) {
1842         return DISAS_NORETURN;
1843     }
1844     gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34);
1845     tcg_temp_free_i32(m34);
1846     set_cc_static(s);
1847     return DISAS_NEXT;
1848 }
1849 
1850 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1851 {
1852     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1853 
1854     if (!m34) {
1855         return DISAS_NORETURN;
1856     }
1857     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1858     tcg_temp_free_i32(m34);
1859     set_cc_static(s);
1860     return DISAS_NEXT;
1861 }
1862 
1863 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1864 {
1865     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1866 
1867     if (!m34) {
1868         return DISAS_NORETURN;
1869     }
1870     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1871     tcg_temp_free_i32(m34);
1872     set_cc_static(s);
1873     return DISAS_NEXT;
1874 }
1875 
1876 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1877 {
1878     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1879 
1880     if (!m34) {
1881         return DISAS_NORETURN;
1882     }
1883     gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34);
1884     tcg_temp_free_i32(m34);
1885     set_cc_static(s);
1886     return DISAS_NEXT;
1887 }
1888 
1889 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1890 {
1891     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1892 
1893     if (!m34) {
1894         return DISAS_NORETURN;
1895     }
1896     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1897     tcg_temp_free_i32(m34);
1898     set_cc_static(s);
1899     return DISAS_NEXT;
1900 }
1901 
1902 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1903 {
1904     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1905 
1906     if (!m34) {
1907         return DISAS_NORETURN;
1908     }
1909     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1910     tcg_temp_free_i32(m34);
1911     set_cc_static(s);
1912     return DISAS_NEXT;
1913 }
1914 
1915 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1916 {
1917     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1918 
1919     if (!m34) {
1920         return DISAS_NORETURN;
1921     }
1922     gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34);
1923     tcg_temp_free_i32(m34);
1924     set_cc_static(s);
1925     return DISAS_NEXT;
1926 }
1927 
1928 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1929 {
1930     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1931 
1932     if (!m34) {
1933         return DISAS_NORETURN;
1934     }
1935     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1936     tcg_temp_free_i32(m34);
1937     set_cc_static(s);
1938     return DISAS_NEXT;
1939 }
1940 
1941 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1942 {
1943     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1944 
1945     if (!m34) {
1946         return DISAS_NORETURN;
1947     }
1948     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1949     tcg_temp_free_i32(m34);
1950     set_cc_static(s);
1951     return DISAS_NEXT;
1952 }
1953 
1954 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1955 {
1956     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1957 
1958     if (!m34) {
1959         return DISAS_NORETURN;
1960     }
1961     gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34);
1962     tcg_temp_free_i32(m34);
1963     set_cc_static(s);
1964     return DISAS_NEXT;
1965 }
1966 
1967 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1968 {
1969     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1970 
1971     if (!m34) {
1972         return DISAS_NORETURN;
1973     }
1974     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1975     tcg_temp_free_i32(m34);
1976     return DISAS_NEXT;
1977 }
1978 
1979 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1980 {
1981     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1982 
1983     if (!m34) {
1984         return DISAS_NORETURN;
1985     }
1986     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1987     tcg_temp_free_i32(m34);
1988     return DISAS_NEXT;
1989 }
1990 
1991 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1992 {
1993     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1994 
1995     if (!m34) {
1996         return DISAS_NORETURN;
1997     }
1998     gen_helper_cxgb(o->out, cpu_env, o->in2, m34);
1999     tcg_temp_free_i32(m34);
2000     return_low128(o->out2);
2001     return DISAS_NEXT;
2002 }
2003 
2004 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
2005 {
2006     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2007 
2008     if (!m34) {
2009         return DISAS_NORETURN;
2010     }
2011     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
2012     tcg_temp_free_i32(m34);
2013     return DISAS_NEXT;
2014 }
2015 
2016 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
2017 {
2018     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2019 
2020     if (!m34) {
2021         return DISAS_NORETURN;
2022     }
2023     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
2024     tcg_temp_free_i32(m34);
2025     return DISAS_NEXT;
2026 }
2027 
2028 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
2029 {
2030     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
2031 
2032     if (!m34) {
2033         return DISAS_NORETURN;
2034     }
2035     gen_helper_cxlgb(o->out, cpu_env, o->in2, m34);
2036     tcg_temp_free_i32(m34);
2037     return_low128(o->out2);
2038     return DISAS_NEXT;
2039 }
2040 
2041 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
2042 {
2043     int r2 = get_field(s, r2);
2044     TCGv_i64 len = tcg_temp_new_i64();
2045 
2046     gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2047     set_cc_static(s);
2048     return_low128(o->out);
2049 
2050     tcg_gen_add_i64(regs[r2], regs[r2], len);
2051     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2052     tcg_temp_free_i64(len);
2053 
2054     return DISAS_NEXT;
2055 }
2056 
2057 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2058 {
2059     int l = get_field(s, l1);
2060     TCGv_i32 vl;
2061 
2062     switch (l + 1) {
2063     case 1:
2064         tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
2065         tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
2066         break;
2067     case 2:
2068         tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
2069         tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
2070         break;
2071     case 4:
2072         tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
2073         tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
2074         break;
2075     case 8:
2076         tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
2077         tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
2078         break;
2079     default:
2080         vl = tcg_const_i32(l);
2081         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2082         tcg_temp_free_i32(vl);
2083         set_cc_static(s);
2084         return DISAS_NEXT;
2085     }
2086     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2087     return DISAS_NEXT;
2088 }
2089 
2090 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2091 {
2092     int r1 = get_field(s, r1);
2093     int r2 = get_field(s, r2);
2094     TCGv_i32 t1, t2;
2095 
2096     /* r1 and r2 must be even.  */
2097     if (r1 & 1 || r2 & 1) {
2098         gen_program_exception(s, PGM_SPECIFICATION);
2099         return DISAS_NORETURN;
2100     }
2101 
2102     t1 = tcg_const_i32(r1);
2103     t2 = tcg_const_i32(r2);
2104     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2105     tcg_temp_free_i32(t1);
2106     tcg_temp_free_i32(t2);
2107     set_cc_static(s);
2108     return DISAS_NEXT;
2109 }
2110 
2111 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2112 {
2113     int r1 = get_field(s, r1);
2114     int r3 = get_field(s, r3);
2115     TCGv_i32 t1, t3;
2116 
2117     /* r1 and r3 must be even.  */
2118     if (r1 & 1 || r3 & 1) {
2119         gen_program_exception(s, PGM_SPECIFICATION);
2120         return DISAS_NORETURN;
2121     }
2122 
2123     t1 = tcg_const_i32(r1);
2124     t3 = tcg_const_i32(r3);
2125     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2126     tcg_temp_free_i32(t1);
2127     tcg_temp_free_i32(t3);
2128     set_cc_static(s);
2129     return DISAS_NEXT;
2130 }
2131 
2132 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2133 {
2134     int r1 = get_field(s, r1);
2135     int r3 = get_field(s, r3);
2136     TCGv_i32 t1, t3;
2137 
2138     /* r1 and r3 must be even.  */
2139     if (r1 & 1 || r3 & 1) {
2140         gen_program_exception(s, PGM_SPECIFICATION);
2141         return DISAS_NORETURN;
2142     }
2143 
2144     t1 = tcg_const_i32(r1);
2145     t3 = tcg_const_i32(r3);
2146     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2147     tcg_temp_free_i32(t1);
2148     tcg_temp_free_i32(t3);
2149     set_cc_static(s);
2150     return DISAS_NEXT;
2151 }
2152 
2153 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2154 {
2155     TCGv_i32 m3 = tcg_const_i32(get_field(s, m3));
2156     TCGv_i32 t1 = tcg_temp_new_i32();
2157     tcg_gen_extrl_i64_i32(t1, o->in1);
2158     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2159     set_cc_static(s);
2160     tcg_temp_free_i32(t1);
2161     tcg_temp_free_i32(m3);
2162     return DISAS_NEXT;
2163 }
2164 
2165 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2166 {
2167     gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2168     set_cc_static(s);
2169     return_low128(o->in2);
2170     return DISAS_NEXT;
2171 }
2172 
2173 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2174 {
2175     TCGv_i64 t = tcg_temp_new_i64();
2176     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2177     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2178     tcg_gen_or_i64(o->out, o->out, t);
2179     tcg_temp_free_i64(t);
2180     return DISAS_NEXT;
2181 }
2182 
2183 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2184 {
2185     int d2 = get_field(s, d2);
2186     int b2 = get_field(s, b2);
2187     TCGv_i64 addr, cc;
2188 
2189     /* Note that in1 = R3 (new value) and
2190        in2 = (zero-extended) R1 (expected value).  */
2191 
2192     addr = get_address(s, 0, b2, d2);
2193     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2194                                get_mem_index(s), s->insn->data | MO_ALIGN);
2195     tcg_temp_free_i64(addr);
2196 
2197     /* Are the memory and expected values (un)equal?  Note that this setcond
2198        produces the output CC value, thus the NE sense of the test.  */
2199     cc = tcg_temp_new_i64();
2200     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2201     tcg_gen_extrl_i64_i32(cc_op, cc);
2202     tcg_temp_free_i64(cc);
2203     set_cc_static(s);
2204 
2205     return DISAS_NEXT;
2206 }
2207 
2208 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2209 {
2210     int r1 = get_field(s, r1);
2211     int r3 = get_field(s, r3);
2212     int d2 = get_field(s, d2);
2213     int b2 = get_field(s, b2);
2214     DisasJumpType ret = DISAS_NEXT;
2215     TCGv_i64 addr;
2216     TCGv_i32 t_r1, t_r3;
2217 
2218     /* Note that R1:R1+1 = expected value and R3:R3+1 = new value.  */
2219     addr = get_address(s, 0, b2, d2);
2220     t_r1 = tcg_const_i32(r1);
2221     t_r3 = tcg_const_i32(r3);
2222     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
2223         gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
2224     } else if (HAVE_CMPXCHG128) {
2225         gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
2226     } else {
2227         gen_helper_exit_atomic(cpu_env);
2228         ret = DISAS_NORETURN;
2229     }
2230     tcg_temp_free_i64(addr);
2231     tcg_temp_free_i32(t_r1);
2232     tcg_temp_free_i32(t_r3);
2233 
2234     set_cc_static(s);
2235     return ret;
2236 }
2237 
2238 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2239 {
2240     int r3 = get_field(s, r3);
2241     TCGv_i32 t_r3 = tcg_const_i32(r3);
2242 
2243     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2244         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2245     } else {
2246         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2247     }
2248     tcg_temp_free_i32(t_r3);
2249 
2250     set_cc_static(s);
2251     return DISAS_NEXT;
2252 }
2253 
2254 #ifndef CONFIG_USER_ONLY
2255 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2256 {
2257     MemOp mop = s->insn->data;
2258     TCGv_i64 addr, old, cc;
2259     TCGLabel *lab = gen_new_label();
2260 
2261     /* Note that in1 = R1 (zero-extended expected value),
2262        out = R1 (original reg), out2 = R1+1 (new value).  */
2263 
2264     addr = tcg_temp_new_i64();
2265     old = tcg_temp_new_i64();
2266     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2267     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2268                                get_mem_index(s), mop | MO_ALIGN);
2269     tcg_temp_free_i64(addr);
2270 
2271     /* Are the memory and expected values (un)equal?  */
2272     cc = tcg_temp_new_i64();
2273     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2274     tcg_gen_extrl_i64_i32(cc_op, cc);
2275 
2276     /* Write back the output now, so that it happens before the
2277        following branch, so that we don't need local temps.  */
2278     if ((mop & MO_SIZE) == MO_32) {
2279         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2280     } else {
2281         tcg_gen_mov_i64(o->out, old);
2282     }
2283     tcg_temp_free_i64(old);
2284 
2285     /* If the comparison was equal, and the LSB of R2 was set,
2286        then we need to flush the TLB (for all cpus).  */
2287     tcg_gen_xori_i64(cc, cc, 1);
2288     tcg_gen_and_i64(cc, cc, o->in2);
2289     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2290     tcg_temp_free_i64(cc);
2291 
2292     gen_helper_purge(cpu_env);
2293     gen_set_label(lab);
2294 
2295     return DISAS_NEXT;
2296 }
2297 #endif
2298 
2299 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2300 {
2301     TCGv_i64 t1 = tcg_temp_new_i64();
2302     TCGv_i32 t2 = tcg_temp_new_i32();
2303     tcg_gen_extrl_i64_i32(t2, o->in1);
2304     gen_helper_cvd(t1, t2);
2305     tcg_temp_free_i32(t2);
2306     tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2307     tcg_temp_free_i64(t1);
2308     return DISAS_NEXT;
2309 }
2310 
2311 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2312 {
2313     int m3 = get_field(s, m3);
2314     TCGLabel *lab = gen_new_label();
2315     TCGCond c;
2316 
2317     c = tcg_invert_cond(ltgt_cond[m3]);
2318     if (s->insn->data) {
2319         c = tcg_unsigned_cond(c);
2320     }
2321     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2322 
2323     /* Trap.  */
2324     gen_trap(s);
2325 
2326     gen_set_label(lab);
2327     return DISAS_NEXT;
2328 }
2329 
2330 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2331 {
2332     int m3 = get_field(s, m3);
2333     int r1 = get_field(s, r1);
2334     int r2 = get_field(s, r2);
2335     TCGv_i32 tr1, tr2, chk;
2336 
2337     /* R1 and R2 must both be even.  */
2338     if ((r1 | r2) & 1) {
2339         gen_program_exception(s, PGM_SPECIFICATION);
2340         return DISAS_NORETURN;
2341     }
2342     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2343         m3 = 0;
2344     }
2345 
2346     tr1 = tcg_const_i32(r1);
2347     tr2 = tcg_const_i32(r2);
2348     chk = tcg_const_i32(m3);
2349 
2350     switch (s->insn->data) {
2351     case 12:
2352         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2353         break;
2354     case 14:
2355         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2356         break;
2357     case 21:
2358         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2359         break;
2360     case 24:
2361         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2362         break;
2363     case 41:
2364         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2365         break;
2366     case 42:
2367         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2368         break;
2369     default:
2370         g_assert_not_reached();
2371     }
2372 
2373     tcg_temp_free_i32(tr1);
2374     tcg_temp_free_i32(tr2);
2375     tcg_temp_free_i32(chk);
2376     set_cc_static(s);
2377     return DISAS_NEXT;
2378 }
2379 
2380 #ifndef CONFIG_USER_ONLY
2381 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2382 {
2383     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
2384     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
2385     TCGv_i32 func_code = tcg_const_i32(get_field(s, i2));
2386 
2387     gen_helper_diag(cpu_env, r1, r3, func_code);
2388 
2389     tcg_temp_free_i32(func_code);
2390     tcg_temp_free_i32(r3);
2391     tcg_temp_free_i32(r1);
2392     return DISAS_NEXT;
2393 }
2394 #endif
2395 
2396 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2397 {
2398     gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2399     return_low128(o->out);
2400     return DISAS_NEXT;
2401 }
2402 
2403 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2404 {
2405     gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2406     return_low128(o->out);
2407     return DISAS_NEXT;
2408 }
2409 
2410 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2411 {
2412     gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2413     return_low128(o->out);
2414     return DISAS_NEXT;
2415 }
2416 
2417 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2418 {
2419     gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2420     return_low128(o->out);
2421     return DISAS_NEXT;
2422 }
2423 
2424 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2425 {
2426     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2427     return DISAS_NEXT;
2428 }
2429 
2430 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2431 {
2432     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2433     return DISAS_NEXT;
2434 }
2435 
2436 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2437 {
2438     gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2439     return_low128(o->out2);
2440     return DISAS_NEXT;
2441 }
2442 
2443 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2444 {
2445     int r2 = get_field(s, r2);
2446     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2447     return DISAS_NEXT;
2448 }
2449 
2450 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2451 {
2452     /* No cache information provided.  */
2453     tcg_gen_movi_i64(o->out, -1);
2454     return DISAS_NEXT;
2455 }
2456 
2457 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2458 {
2459     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2460     return DISAS_NEXT;
2461 }
2462 
2463 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2464 {
2465     int r1 = get_field(s, r1);
2466     int r2 = get_field(s, r2);
2467     TCGv_i64 t = tcg_temp_new_i64();
2468 
2469     /* Note the "subsequently" in the PoO, which implies a defined result
2470        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2471     tcg_gen_shri_i64(t, psw_mask, 32);
2472     store_reg32_i64(r1, t);
2473     if (r2 != 0) {
2474         store_reg32_i64(r2, psw_mask);
2475     }
2476 
2477     tcg_temp_free_i64(t);
2478     return DISAS_NEXT;
2479 }
2480 
2481 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2482 {
2483     int r1 = get_field(s, r1);
2484     TCGv_i32 ilen;
2485     TCGv_i64 v1;
2486 
2487     /* Nested EXECUTE is not allowed.  */
2488     if (unlikely(s->ex_value)) {
2489         gen_program_exception(s, PGM_EXECUTE);
2490         return DISAS_NORETURN;
2491     }
2492 
2493     update_psw_addr(s);
2494     update_cc_op(s);
2495 
2496     if (r1 == 0) {
2497         v1 = tcg_const_i64(0);
2498     } else {
2499         v1 = regs[r1];
2500     }
2501 
2502     ilen = tcg_const_i32(s->ilen);
2503     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2504     tcg_temp_free_i32(ilen);
2505 
2506     if (r1 == 0) {
2507         tcg_temp_free_i64(v1);
2508     }
2509 
2510     return DISAS_PC_CC_UPDATED;
2511 }
2512 
2513 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2514 {
2515     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2516 
2517     if (!m34) {
2518         return DISAS_NORETURN;
2519     }
2520     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2521     tcg_temp_free_i32(m34);
2522     return DISAS_NEXT;
2523 }
2524 
2525 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2526 {
2527     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2528 
2529     if (!m34) {
2530         return DISAS_NORETURN;
2531     }
2532     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2533     tcg_temp_free_i32(m34);
2534     return DISAS_NEXT;
2535 }
2536 
2537 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2538 {
2539     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2540 
2541     if (!m34) {
2542         return DISAS_NORETURN;
2543     }
2544     gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34);
2545     return_low128(o->out2);
2546     tcg_temp_free_i32(m34);
2547     return DISAS_NEXT;
2548 }
2549 
2550 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2551 {
2552     /* We'll use the original input for cc computation, since we get to
2553        compare that against 0, which ought to be better than comparing
2554        the real output against 64.  It also lets cc_dst be a convenient
2555        temporary during our computation.  */
2556     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2557 
2558     /* R1 = IN ? CLZ(IN) : 64.  */
2559     tcg_gen_clzi_i64(o->out, o->in2, 64);
2560 
2561     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2562        value by 64, which is undefined.  But since the shift is 64 iff the
2563        input is zero, we still get the correct result after and'ing.  */
2564     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2565     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2566     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2567     return DISAS_NEXT;
2568 }
2569 
2570 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2571 {
2572     int m3 = get_field(s, m3);
2573     int pos, len, base = s->insn->data;
2574     TCGv_i64 tmp = tcg_temp_new_i64();
2575     uint64_t ccm;
2576 
2577     switch (m3) {
2578     case 0xf:
2579         /* Effectively a 32-bit load.  */
2580         tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2581         len = 32;
2582         goto one_insert;
2583 
2584     case 0xc:
2585     case 0x6:
2586     case 0x3:
2587         /* Effectively a 16-bit load.  */
2588         tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2589         len = 16;
2590         goto one_insert;
2591 
2592     case 0x8:
2593     case 0x4:
2594     case 0x2:
2595     case 0x1:
2596         /* Effectively an 8-bit load.  */
2597         tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2598         len = 8;
2599         goto one_insert;
2600 
2601     one_insert:
2602         pos = base + ctz32(m3) * 8;
2603         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2604         ccm = ((1ull << len) - 1) << pos;
2605         break;
2606 
2607     default:
2608         /* This is going to be a sequence of loads and inserts.  */
2609         pos = base + 32 - 8;
2610         ccm = 0;
2611         while (m3) {
2612             if (m3 & 0x8) {
2613                 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2614                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2615                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2616                 ccm |= 0xffull << pos;
2617             }
2618             m3 = (m3 << 1) & 0xf;
2619             pos -= 8;
2620         }
2621         break;
2622     }
2623 
2624     tcg_gen_movi_i64(tmp, ccm);
2625     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2626     tcg_temp_free_i64(tmp);
2627     return DISAS_NEXT;
2628 }
2629 
2630 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2631 {
2632     int shift = s->insn->data & 0xff;
2633     int size = s->insn->data >> 8;
2634     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2635     return DISAS_NEXT;
2636 }
2637 
2638 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2639 {
2640     TCGv_i64 t1, t2;
2641 
2642     gen_op_calc_cc(s);
2643     t1 = tcg_temp_new_i64();
2644     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2645     t2 = tcg_temp_new_i64();
2646     tcg_gen_extu_i32_i64(t2, cc_op);
2647     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2648     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2649     tcg_temp_free_i64(t1);
2650     tcg_temp_free_i64(t2);
2651     return DISAS_NEXT;
2652 }
2653 
2654 #ifndef CONFIG_USER_ONLY
2655 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2656 {
2657     TCGv_i32 m4;
2658 
2659     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2660         m4 = tcg_const_i32(get_field(s, m4));
2661     } else {
2662         m4 = tcg_const_i32(0);
2663     }
2664     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2665     tcg_temp_free_i32(m4);
2666     return DISAS_NEXT;
2667 }
2668 
2669 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2670 {
2671     TCGv_i32 m4;
2672 
2673     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2674         m4 = tcg_const_i32(get_field(s, m4));
2675     } else {
2676         m4 = tcg_const_i32(0);
2677     }
2678     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2679     tcg_temp_free_i32(m4);
2680     return DISAS_NEXT;
2681 }
2682 
2683 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2684 {
2685     gen_helper_iske(o->out, cpu_env, o->in2);
2686     return DISAS_NEXT;
2687 }
2688 #endif
2689 
2690 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2691 {
2692     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2693     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2694     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2695     TCGv_i32 t_r1, t_r2, t_r3, type;
2696 
2697     switch (s->insn->data) {
2698     case S390_FEAT_TYPE_KMA:
2699         if (r3 == r1 || r3 == r2) {
2700             gen_program_exception(s, PGM_SPECIFICATION);
2701             return DISAS_NORETURN;
2702         }
2703         /* FALL THROUGH */
2704     case S390_FEAT_TYPE_KMCTR:
2705         if (r3 & 1 || !r3) {
2706             gen_program_exception(s, PGM_SPECIFICATION);
2707             return DISAS_NORETURN;
2708         }
2709         /* FALL THROUGH */
2710     case S390_FEAT_TYPE_PPNO:
2711     case S390_FEAT_TYPE_KMF:
2712     case S390_FEAT_TYPE_KMC:
2713     case S390_FEAT_TYPE_KMO:
2714     case S390_FEAT_TYPE_KM:
2715         if (r1 & 1 || !r1) {
2716             gen_program_exception(s, PGM_SPECIFICATION);
2717             return DISAS_NORETURN;
2718         }
2719         /* FALL THROUGH */
2720     case S390_FEAT_TYPE_KMAC:
2721     case S390_FEAT_TYPE_KIMD:
2722     case S390_FEAT_TYPE_KLMD:
2723         if (r2 & 1 || !r2) {
2724             gen_program_exception(s, PGM_SPECIFICATION);
2725             return DISAS_NORETURN;
2726         }
2727         /* FALL THROUGH */
2728     case S390_FEAT_TYPE_PCKMO:
2729     case S390_FEAT_TYPE_PCC:
2730         break;
2731     default:
2732         g_assert_not_reached();
2733     };
2734 
2735     t_r1 = tcg_const_i32(r1);
2736     t_r2 = tcg_const_i32(r2);
2737     t_r3 = tcg_const_i32(r3);
2738     type = tcg_const_i32(s->insn->data);
2739     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2740     set_cc_static(s);
2741     tcg_temp_free_i32(t_r1);
2742     tcg_temp_free_i32(t_r2);
2743     tcg_temp_free_i32(t_r3);
2744     tcg_temp_free_i32(type);
2745     return DISAS_NEXT;
2746 }
2747 
2748 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2749 {
2750     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2751     set_cc_static(s);
2752     return DISAS_NEXT;
2753 }
2754 
2755 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2756 {
2757     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2758     set_cc_static(s);
2759     return DISAS_NEXT;
2760 }
2761 
2762 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2763 {
2764     gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2765     set_cc_static(s);
2766     return DISAS_NEXT;
2767 }
2768 
2769 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2770 {
2771     /* The real output is indeed the original value in memory;
2772        recompute the addition for the computation of CC.  */
2773     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2774                                  s->insn->data | MO_ALIGN);
2775     /* However, we need to recompute the addition for setting CC.  */
2776     tcg_gen_add_i64(o->out, o->in1, o->in2);
2777     return DISAS_NEXT;
2778 }
2779 
2780 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2781 {
2782     /* The real output is indeed the original value in memory;
2783        recompute the addition for the computation of CC.  */
2784     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2785                                  s->insn->data | MO_ALIGN);
2786     /* However, we need to recompute the operation for setting CC.  */
2787     tcg_gen_and_i64(o->out, o->in1, o->in2);
2788     return DISAS_NEXT;
2789 }
2790 
2791 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2792 {
2793     /* The real output is indeed the original value in memory;
2794        recompute the addition for the computation of CC.  */
2795     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2796                                 s->insn->data | MO_ALIGN);
2797     /* However, we need to recompute the operation for setting CC.  */
2798     tcg_gen_or_i64(o->out, o->in1, o->in2);
2799     return DISAS_NEXT;
2800 }
2801 
2802 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2803 {
2804     /* The real output is indeed the original value in memory;
2805        recompute the addition for the computation of CC.  */
2806     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2807                                  s->insn->data | MO_ALIGN);
2808     /* However, we need to recompute the operation for setting CC.  */
2809     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2810     return DISAS_NEXT;
2811 }
2812 
2813 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2814 {
2815     gen_helper_ldeb(o->out, cpu_env, o->in2);
2816     return DISAS_NEXT;
2817 }
2818 
2819 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2820 {
2821     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2822 
2823     if (!m34) {
2824         return DISAS_NORETURN;
2825     }
2826     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2827     tcg_temp_free_i32(m34);
2828     return DISAS_NEXT;
2829 }
2830 
2831 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2832 {
2833     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2834 
2835     if (!m34) {
2836         return DISAS_NORETURN;
2837     }
2838     gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34);
2839     tcg_temp_free_i32(m34);
2840     return DISAS_NEXT;
2841 }
2842 
2843 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2844 {
2845     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2846 
2847     if (!m34) {
2848         return DISAS_NORETURN;
2849     }
2850     gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34);
2851     tcg_temp_free_i32(m34);
2852     return DISAS_NEXT;
2853 }
2854 
2855 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2856 {
2857     gen_helper_lxdb(o->out, cpu_env, o->in2);
2858     return_low128(o->out2);
2859     return DISAS_NEXT;
2860 }
2861 
2862 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2863 {
2864     gen_helper_lxeb(o->out, cpu_env, o->in2);
2865     return_low128(o->out2);
2866     return DISAS_NEXT;
2867 }
2868 
2869 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2870 {
2871     tcg_gen_shli_i64(o->out, o->in2, 32);
2872     return DISAS_NEXT;
2873 }
2874 
2875 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2876 {
2877     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2878     return DISAS_NEXT;
2879 }
2880 
2881 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2882 {
2883     tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2884     return DISAS_NEXT;
2885 }
2886 
2887 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2888 {
2889     tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2890     return DISAS_NEXT;
2891 }
2892 
2893 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2894 {
2895     tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2896     return DISAS_NEXT;
2897 }
2898 
2899 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2900 {
2901     tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2902     return DISAS_NEXT;
2903 }
2904 
2905 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2906 {
2907     tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2908     return DISAS_NEXT;
2909 }
2910 
2911 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2912 {
2913     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2914     return DISAS_NEXT;
2915 }
2916 
2917 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2918 {
2919     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2920     return DISAS_NEXT;
2921 }
2922 
2923 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2924 {
2925     TCGLabel *lab = gen_new_label();
2926     store_reg32_i64(get_field(s, r1), o->in2);
2927     /* The value is stored even in case of trap. */
2928     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2929     gen_trap(s);
2930     gen_set_label(lab);
2931     return DISAS_NEXT;
2932 }
2933 
2934 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2935 {
2936     TCGLabel *lab = gen_new_label();
2937     tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2938     /* The value is stored even in case of trap. */
2939     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2940     gen_trap(s);
2941     gen_set_label(lab);
2942     return DISAS_NEXT;
2943 }
2944 
2945 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2946 {
2947     TCGLabel *lab = gen_new_label();
2948     store_reg32h_i64(get_field(s, r1), o->in2);
2949     /* The value is stored even in case of trap. */
2950     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2951     gen_trap(s);
2952     gen_set_label(lab);
2953     return DISAS_NEXT;
2954 }
2955 
2956 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2957 {
2958     TCGLabel *lab = gen_new_label();
2959     tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2960     /* The value is stored even in case of trap. */
2961     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2962     gen_trap(s);
2963     gen_set_label(lab);
2964     return DISAS_NEXT;
2965 }
2966 
2967 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2968 {
2969     TCGLabel *lab = gen_new_label();
2970     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2971     /* The value is stored even in case of trap. */
2972     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2973     gen_trap(s);
2974     gen_set_label(lab);
2975     return DISAS_NEXT;
2976 }
2977 
2978 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2979 {
2980     DisasCompare c;
2981 
2982     if (have_field(s, m3)) {
2983         /* LOAD * ON CONDITION */
2984         disas_jcc(s, &c, get_field(s, m3));
2985     } else {
2986         /* SELECT */
2987         disas_jcc(s, &c, get_field(s, m4));
2988     }
2989 
2990     if (c.is_64) {
2991         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2992                             o->in2, o->in1);
2993         free_compare(&c);
2994     } else {
2995         TCGv_i32 t32 = tcg_temp_new_i32();
2996         TCGv_i64 t, z;
2997 
2998         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2999         free_compare(&c);
3000 
3001         t = tcg_temp_new_i64();
3002         tcg_gen_extu_i32_i64(t, t32);
3003         tcg_temp_free_i32(t32);
3004 
3005         z = tcg_const_i64(0);
3006         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
3007         tcg_temp_free_i64(t);
3008         tcg_temp_free_i64(z);
3009     }
3010 
3011     return DISAS_NEXT;
3012 }
3013 
3014 #ifndef CONFIG_USER_ONLY
3015 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
3016 {
3017     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3018     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3019     gen_helper_lctl(cpu_env, r1, o->in2, r3);
3020     tcg_temp_free_i32(r1);
3021     tcg_temp_free_i32(r3);
3022     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3023     s->exit_to_mainloop = true;
3024     return DISAS_TOO_MANY;
3025 }
3026 
3027 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
3028 {
3029     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3030     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3031     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
3032     tcg_temp_free_i32(r1);
3033     tcg_temp_free_i32(r3);
3034     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
3035     s->exit_to_mainloop = true;
3036     return DISAS_TOO_MANY;
3037 }
3038 
3039 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
3040 {
3041     gen_helper_lra(o->out, cpu_env, o->in2);
3042     set_cc_static(s);
3043     return DISAS_NEXT;
3044 }
3045 
3046 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
3047 {
3048     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
3049     return DISAS_NEXT;
3050 }
3051 
3052 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
3053 {
3054     TCGv_i64 t1, t2;
3055 
3056     per_breaking_event(s);
3057 
3058     t1 = tcg_temp_new_i64();
3059     t2 = tcg_temp_new_i64();
3060     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3061                         MO_TEUL | MO_ALIGN_8);
3062     tcg_gen_addi_i64(o->in2, o->in2, 4);
3063     tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
3064     /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK.  */
3065     tcg_gen_shli_i64(t1, t1, 32);
3066     gen_helper_load_psw(cpu_env, t1, t2);
3067     tcg_temp_free_i64(t1);
3068     tcg_temp_free_i64(t2);
3069     return DISAS_NORETURN;
3070 }
3071 
3072 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
3073 {
3074     TCGv_i64 t1, t2;
3075 
3076     per_breaking_event(s);
3077 
3078     t1 = tcg_temp_new_i64();
3079     t2 = tcg_temp_new_i64();
3080     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
3081                         MO_TEUQ | MO_ALIGN_8);
3082     tcg_gen_addi_i64(o->in2, o->in2, 8);
3083     tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
3084     gen_helper_load_psw(cpu_env, t1, t2);
3085     tcg_temp_free_i64(t1);
3086     tcg_temp_free_i64(t2);
3087     return DISAS_NORETURN;
3088 }
3089 #endif
3090 
3091 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
3092 {
3093     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
3094     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
3095     gen_helper_lam(cpu_env, r1, o->in2, r3);
3096     tcg_temp_free_i32(r1);
3097     tcg_temp_free_i32(r3);
3098     return DISAS_NEXT;
3099 }
3100 
3101 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
3102 {
3103     int r1 = get_field(s, r1);
3104     int r3 = get_field(s, r3);
3105     TCGv_i64 t1, t2;
3106 
3107     /* Only one register to read. */
3108     t1 = tcg_temp_new_i64();
3109     if (unlikely(r1 == r3)) {
3110         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3111         store_reg32_i64(r1, t1);
3112         tcg_temp_free(t1);
3113         return DISAS_NEXT;
3114     }
3115 
3116     /* First load the values of the first and last registers to trigger
3117        possible page faults. */
3118     t2 = tcg_temp_new_i64();
3119     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3120     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3121     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3122     store_reg32_i64(r1, t1);
3123     store_reg32_i64(r3, t2);
3124 
3125     /* Only two registers to read. */
3126     if (((r1 + 1) & 15) == r3) {
3127         tcg_temp_free(t2);
3128         tcg_temp_free(t1);
3129         return DISAS_NEXT;
3130     }
3131 
3132     /* Then load the remaining registers. Page fault can't occur. */
3133     r3 = (r3 - 1) & 15;
3134     tcg_gen_movi_i64(t2, 4);
3135     while (r1 != r3) {
3136         r1 = (r1 + 1) & 15;
3137         tcg_gen_add_i64(o->in2, o->in2, t2);
3138         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3139         store_reg32_i64(r1, t1);
3140     }
3141     tcg_temp_free(t2);
3142     tcg_temp_free(t1);
3143 
3144     return DISAS_NEXT;
3145 }
3146 
3147 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3148 {
3149     int r1 = get_field(s, r1);
3150     int r3 = get_field(s, r3);
3151     TCGv_i64 t1, t2;
3152 
3153     /* Only one register to read. */
3154     t1 = tcg_temp_new_i64();
3155     if (unlikely(r1 == r3)) {
3156         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3157         store_reg32h_i64(r1, t1);
3158         tcg_temp_free(t1);
3159         return DISAS_NEXT;
3160     }
3161 
3162     /* First load the values of the first and last registers to trigger
3163        possible page faults. */
3164     t2 = tcg_temp_new_i64();
3165     tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3166     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3167     tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
3168     store_reg32h_i64(r1, t1);
3169     store_reg32h_i64(r3, t2);
3170 
3171     /* Only two registers to read. */
3172     if (((r1 + 1) & 15) == r3) {
3173         tcg_temp_free(t2);
3174         tcg_temp_free(t1);
3175         return DISAS_NEXT;
3176     }
3177 
3178     /* Then load the remaining registers. Page fault can't occur. */
3179     r3 = (r3 - 1) & 15;
3180     tcg_gen_movi_i64(t2, 4);
3181     while (r1 != r3) {
3182         r1 = (r1 + 1) & 15;
3183         tcg_gen_add_i64(o->in2, o->in2, t2);
3184         tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
3185         store_reg32h_i64(r1, t1);
3186     }
3187     tcg_temp_free(t2);
3188     tcg_temp_free(t1);
3189 
3190     return DISAS_NEXT;
3191 }
3192 
3193 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3194 {
3195     int r1 = get_field(s, r1);
3196     int r3 = get_field(s, r3);
3197     TCGv_i64 t1, t2;
3198 
3199     /* Only one register to read. */
3200     if (unlikely(r1 == r3)) {
3201         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3202         return DISAS_NEXT;
3203     }
3204 
3205     /* First load the values of the first and last registers to trigger
3206        possible page faults. */
3207     t1 = tcg_temp_new_i64();
3208     t2 = tcg_temp_new_i64();
3209     tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
3210     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3211     tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
3212     tcg_gen_mov_i64(regs[r1], t1);
3213     tcg_temp_free(t2);
3214 
3215     /* Only two registers to read. */
3216     if (((r1 + 1) & 15) == r3) {
3217         tcg_temp_free(t1);
3218         return DISAS_NEXT;
3219     }
3220 
3221     /* Then load the remaining registers. Page fault can't occur. */
3222     r3 = (r3 - 1) & 15;
3223     tcg_gen_movi_i64(t1, 8);
3224     while (r1 != r3) {
3225         r1 = (r1 + 1) & 15;
3226         tcg_gen_add_i64(o->in2, o->in2, t1);
3227         tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
3228     }
3229     tcg_temp_free(t1);
3230 
3231     return DISAS_NEXT;
3232 }
3233 
3234 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3235 {
3236     TCGv_i64 a1, a2;
3237     MemOp mop = s->insn->data;
3238 
3239     /* In a parallel context, stop the world and single step.  */
3240     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3241         update_psw_addr(s);
3242         update_cc_op(s);
3243         gen_exception(EXCP_ATOMIC);
3244         return DISAS_NORETURN;
3245     }
3246 
3247     /* In a serial context, perform the two loads ... */
3248     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3249     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3250     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3251     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3252     tcg_temp_free_i64(a1);
3253     tcg_temp_free_i64(a2);
3254 
3255     /* ... and indicate that we performed them while interlocked.  */
3256     gen_op_movi_cc(s, 0);
3257     return DISAS_NEXT;
3258 }
3259 
3260 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3261 {
3262     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3263         gen_helper_lpq(o->out, cpu_env, o->in2);
3264     } else if (HAVE_ATOMIC128) {
3265         gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3266     } else {
3267         gen_helper_exit_atomic(cpu_env);
3268         return DISAS_NORETURN;
3269     }
3270     return_low128(o->out2);
3271     return DISAS_NEXT;
3272 }
3273 
3274 #ifndef CONFIG_USER_ONLY
3275 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3276 {
3277     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3278     return DISAS_NEXT;
3279 }
3280 #endif
3281 
3282 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3283 {
3284     tcg_gen_andi_i64(o->out, o->in2, -256);
3285     return DISAS_NEXT;
3286 }
3287 
3288 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3289 {
3290     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3291 
3292     if (get_field(s, m3) > 6) {
3293         gen_program_exception(s, PGM_SPECIFICATION);
3294         return DISAS_NORETURN;
3295     }
3296 
3297     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3298     tcg_gen_neg_i64(o->addr1, o->addr1);
3299     tcg_gen_movi_i64(o->out, 16);
3300     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3301     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3302     return DISAS_NEXT;
3303 }
3304 
3305 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3306 {
3307 #if !defined(CONFIG_USER_ONLY)
3308     TCGv_i32 i2;
3309 #endif
3310     const uint16_t monitor_class = get_field(s, i2);
3311 
3312     if (monitor_class & 0xff00) {
3313         gen_program_exception(s, PGM_SPECIFICATION);
3314         return DISAS_NORETURN;
3315     }
3316 
3317 #if !defined(CONFIG_USER_ONLY)
3318     i2 = tcg_const_i32(monitor_class);
3319     gen_helper_monitor_call(cpu_env, o->addr1, i2);
3320     tcg_temp_free_i32(i2);
3321 #endif
3322     /* Defaults to a NOP. */
3323     return DISAS_NEXT;
3324 }
3325 
3326 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3327 {
3328     o->out = o->in2;
3329     o->g_out = o->g_in2;
3330     o->in2 = NULL;
3331     o->g_in2 = false;
3332     return DISAS_NEXT;
3333 }
3334 
3335 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3336 {
3337     int b2 = get_field(s, b2);
3338     TCGv ar1 = tcg_temp_new_i64();
3339 
3340     o->out = o->in2;
3341     o->g_out = o->g_in2;
3342     o->in2 = NULL;
3343     o->g_in2 = false;
3344 
3345     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3346     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3347         tcg_gen_movi_i64(ar1, 0);
3348         break;
3349     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3350         tcg_gen_movi_i64(ar1, 1);
3351         break;
3352     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3353         if (b2) {
3354             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3355         } else {
3356             tcg_gen_movi_i64(ar1, 0);
3357         }
3358         break;
3359     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3360         tcg_gen_movi_i64(ar1, 2);
3361         break;
3362     }
3363 
3364     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3365     tcg_temp_free_i64(ar1);
3366 
3367     return DISAS_NEXT;
3368 }
3369 
3370 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3371 {
3372     o->out = o->in1;
3373     o->out2 = o->in2;
3374     o->g_out = o->g_in1;
3375     o->g_out2 = o->g_in2;
3376     o->in1 = NULL;
3377     o->in2 = NULL;
3378     o->g_in1 = o->g_in2 = false;
3379     return DISAS_NEXT;
3380 }
3381 
3382 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3383 {
3384     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3385     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3386     tcg_temp_free_i32(l);
3387     return DISAS_NEXT;
3388 }
3389 
3390 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3391 {
3392     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3393     return DISAS_NEXT;
3394 }
3395 
3396 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3397 {
3398     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3399     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3400     tcg_temp_free_i32(l);
3401     return DISAS_NEXT;
3402 }
3403 
3404 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3405 {
3406     int r1 = get_field(s, r1);
3407     int r2 = get_field(s, r2);
3408     TCGv_i32 t1, t2;
3409 
3410     /* r1 and r2 must be even.  */
3411     if (r1 & 1 || r2 & 1) {
3412         gen_program_exception(s, PGM_SPECIFICATION);
3413         return DISAS_NORETURN;
3414     }
3415 
3416     t1 = tcg_const_i32(r1);
3417     t2 = tcg_const_i32(r2);
3418     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3419     tcg_temp_free_i32(t1);
3420     tcg_temp_free_i32(t2);
3421     set_cc_static(s);
3422     return DISAS_NEXT;
3423 }
3424 
3425 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3426 {
3427     int r1 = get_field(s, r1);
3428     int r3 = get_field(s, r3);
3429     TCGv_i32 t1, t3;
3430 
3431     /* r1 and r3 must be even.  */
3432     if (r1 & 1 || r3 & 1) {
3433         gen_program_exception(s, PGM_SPECIFICATION);
3434         return DISAS_NORETURN;
3435     }
3436 
3437     t1 = tcg_const_i32(r1);
3438     t3 = tcg_const_i32(r3);
3439     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3440     tcg_temp_free_i32(t1);
3441     tcg_temp_free_i32(t3);
3442     set_cc_static(s);
3443     return DISAS_NEXT;
3444 }
3445 
3446 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3447 {
3448     int r1 = get_field(s, r1);
3449     int r3 = get_field(s, r3);
3450     TCGv_i32 t1, t3;
3451 
3452     /* r1 and r3 must be even.  */
3453     if (r1 & 1 || r3 & 1) {
3454         gen_program_exception(s, PGM_SPECIFICATION);
3455         return DISAS_NORETURN;
3456     }
3457 
3458     t1 = tcg_const_i32(r1);
3459     t3 = tcg_const_i32(r3);
3460     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3461     tcg_temp_free_i32(t1);
3462     tcg_temp_free_i32(t3);
3463     set_cc_static(s);
3464     return DISAS_NEXT;
3465 }
3466 
3467 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3468 {
3469     int r3 = get_field(s, r3);
3470     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3471     set_cc_static(s);
3472     return DISAS_NEXT;
3473 }
3474 
3475 #ifndef CONFIG_USER_ONLY
3476 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3477 {
3478     int r1 = get_field(s, l1);
3479     int r3 = get_field(s, r3);
3480     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3481     set_cc_static(s);
3482     return DISAS_NEXT;
3483 }
3484 
3485 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3486 {
3487     int r1 = get_field(s, l1);
3488     int r3 = get_field(s, r3);
3489     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3490     set_cc_static(s);
3491     return DISAS_NEXT;
3492 }
3493 #endif
3494 
3495 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3496 {
3497     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3498     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3499     tcg_temp_free_i32(l);
3500     return DISAS_NEXT;
3501 }
3502 
3503 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3504 {
3505     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3506     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3507     tcg_temp_free_i32(l);
3508     return DISAS_NEXT;
3509 }
3510 
3511 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3512 {
3513     TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3514     TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3515 
3516     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3517     tcg_temp_free_i32(t1);
3518     tcg_temp_free_i32(t2);
3519     set_cc_static(s);
3520     return DISAS_NEXT;
3521 }
3522 
3523 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3524 {
3525     TCGv_i32 t1 = tcg_const_i32(get_field(s, r1));
3526     TCGv_i32 t2 = tcg_const_i32(get_field(s, r2));
3527 
3528     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3529     tcg_temp_free_i32(t1);
3530     tcg_temp_free_i32(t2);
3531     set_cc_static(s);
3532     return DISAS_NEXT;
3533 }
3534 
3535 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3536 {
3537     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3538     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3539     tcg_temp_free_i32(l);
3540     return DISAS_NEXT;
3541 }
3542 
3543 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3544 {
3545     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3546     return DISAS_NEXT;
3547 }
3548 
3549 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3550 {
3551     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3552     return DISAS_NEXT;
3553 }
3554 
3555 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3556 {
3557     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3558     return DISAS_NEXT;
3559 }
3560 
3561 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3562 {
3563     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3564     return DISAS_NEXT;
3565 }
3566 
3567 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3568 {
3569     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3570     return DISAS_NEXT;
3571 }
3572 
3573 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3574 {
3575     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3576     return DISAS_NEXT;
3577 }
3578 
3579 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3580 {
3581     gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3582     return_low128(o->out2);
3583     return DISAS_NEXT;
3584 }
3585 
3586 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3587 {
3588     gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3589     return_low128(o->out2);
3590     return DISAS_NEXT;
3591 }
3592 
3593 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3594 {
3595     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3596     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3597     tcg_temp_free_i64(r3);
3598     return DISAS_NEXT;
3599 }
3600 
3601 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3602 {
3603     TCGv_i64 r3 = load_freg(get_field(s, r3));
3604     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3605     tcg_temp_free_i64(r3);
3606     return DISAS_NEXT;
3607 }
3608 
3609 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3610 {
3611     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3612     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3613     tcg_temp_free_i64(r3);
3614     return DISAS_NEXT;
3615 }
3616 
3617 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3618 {
3619     TCGv_i64 r3 = load_freg(get_field(s, r3));
3620     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3621     tcg_temp_free_i64(r3);
3622     return DISAS_NEXT;
3623 }
3624 
3625 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3626 {
3627     TCGv_i64 z, n;
3628     z = tcg_const_i64(0);
3629     n = tcg_temp_new_i64();
3630     tcg_gen_neg_i64(n, o->in2);
3631     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3632     tcg_temp_free_i64(n);
3633     tcg_temp_free_i64(z);
3634     return DISAS_NEXT;
3635 }
3636 
3637 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3638 {
3639     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3640     return DISAS_NEXT;
3641 }
3642 
3643 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3644 {
3645     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3646     return DISAS_NEXT;
3647 }
3648 
3649 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3650 {
3651     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3652     tcg_gen_mov_i64(o->out2, o->in2);
3653     return DISAS_NEXT;
3654 }
3655 
3656 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3657 {
3658     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3659     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3660     tcg_temp_free_i32(l);
3661     set_cc_static(s);
3662     return DISAS_NEXT;
3663 }
3664 
3665 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3666 {
3667     tcg_gen_neg_i64(o->out, o->in2);
3668     return DISAS_NEXT;
3669 }
3670 
3671 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3672 {
3673     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3674     return DISAS_NEXT;
3675 }
3676 
3677 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3678 {
3679     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3680     return DISAS_NEXT;
3681 }
3682 
3683 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3684 {
3685     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3686     tcg_gen_mov_i64(o->out2, o->in2);
3687     return DISAS_NEXT;
3688 }
3689 
3690 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3691 {
3692     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3693     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3694     tcg_temp_free_i32(l);
3695     set_cc_static(s);
3696     return DISAS_NEXT;
3697 }
3698 
3699 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3700 {
3701     tcg_gen_or_i64(o->out, o->in1, o->in2);
3702     return DISAS_NEXT;
3703 }
3704 
3705 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3706 {
3707     int shift = s->insn->data & 0xff;
3708     int size = s->insn->data >> 8;
3709     uint64_t mask = ((1ull << size) - 1) << shift;
3710 
3711     assert(!o->g_in2);
3712     tcg_gen_shli_i64(o->in2, o->in2, shift);
3713     tcg_gen_or_i64(o->out, o->in1, o->in2);
3714 
3715     /* Produce the CC from only the bits manipulated.  */
3716     tcg_gen_andi_i64(cc_dst, o->out, mask);
3717     set_cc_nz_u64(s, cc_dst);
3718     return DISAS_NEXT;
3719 }
3720 
3721 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3722 {
3723     o->in1 = tcg_temp_new_i64();
3724 
3725     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3726         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3727     } else {
3728         /* Perform the atomic operation in memory. */
3729         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3730                                     s->insn->data);
3731     }
3732 
3733     /* Recompute also for atomic case: needed for setting CC. */
3734     tcg_gen_or_i64(o->out, o->in1, o->in2);
3735 
3736     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3737         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3738     }
3739     return DISAS_NEXT;
3740 }
3741 
3742 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3743 {
3744     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
3745     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3746     tcg_temp_free_i32(l);
3747     return DISAS_NEXT;
3748 }
3749 
3750 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3751 {
3752     int l2 = get_field(s, l2) + 1;
3753     TCGv_i32 l;
3754 
3755     /* The length must not exceed 32 bytes.  */
3756     if (l2 > 32) {
3757         gen_program_exception(s, PGM_SPECIFICATION);
3758         return DISAS_NORETURN;
3759     }
3760     l = tcg_const_i32(l2);
3761     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3762     tcg_temp_free_i32(l);
3763     return DISAS_NEXT;
3764 }
3765 
3766 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3767 {
3768     int l2 = get_field(s, l2) + 1;
3769     TCGv_i32 l;
3770 
3771     /* The length must be even and should not exceed 64 bytes.  */
3772     if ((l2 & 1) || (l2 > 64)) {
3773         gen_program_exception(s, PGM_SPECIFICATION);
3774         return DISAS_NORETURN;
3775     }
3776     l = tcg_const_i32(l2);
3777     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3778     tcg_temp_free_i32(l);
3779     return DISAS_NEXT;
3780 }
3781 
3782 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3783 {
3784     const uint8_t m3 = get_field(s, m3);
3785 
3786     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3787         tcg_gen_ctpop_i64(o->out, o->in2);
3788     } else {
3789         gen_helper_popcnt(o->out, o->in2);
3790     }
3791     return DISAS_NEXT;
3792 }
3793 
3794 #ifndef CONFIG_USER_ONLY
3795 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3796 {
3797     gen_helper_ptlb(cpu_env);
3798     return DISAS_NEXT;
3799 }
3800 #endif
3801 
3802 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3803 {
3804     int i3 = get_field(s, i3);
3805     int i4 = get_field(s, i4);
3806     int i5 = get_field(s, i5);
3807     int do_zero = i4 & 0x80;
3808     uint64_t mask, imask, pmask;
3809     int pos, len, rot;
3810 
3811     /* Adjust the arguments for the specific insn.  */
3812     switch (s->fields.op2) {
3813     case 0x55: /* risbg */
3814     case 0x59: /* risbgn */
3815         i3 &= 63;
3816         i4 &= 63;
3817         pmask = ~0;
3818         break;
3819     case 0x5d: /* risbhg */
3820         i3 &= 31;
3821         i4 &= 31;
3822         pmask = 0xffffffff00000000ull;
3823         break;
3824     case 0x51: /* risblg */
3825         i3 = (i3 & 31) + 32;
3826         i4 = (i4 & 31) + 32;
3827         pmask = 0x00000000ffffffffull;
3828         break;
3829     default:
3830         g_assert_not_reached();
3831     }
3832 
3833     /* MASK is the set of bits to be inserted from R2. */
3834     if (i3 <= i4) {
3835         /* [0...i3---i4...63] */
3836         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3837     } else {
3838         /* [0---i4...i3---63] */
3839         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3840     }
3841     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3842     mask &= pmask;
3843 
3844     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3845        insns, we need to keep the other half of the register.  */
3846     imask = ~mask | ~pmask;
3847     if (do_zero) {
3848         imask = ~pmask;
3849     }
3850 
3851     len = i4 - i3 + 1;
3852     pos = 63 - i4;
3853     rot = i5 & 63;
3854 
3855     /* In some cases we can implement this with extract.  */
3856     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3857         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3858         return DISAS_NEXT;
3859     }
3860 
3861     /* In some cases we can implement this with deposit.  */
3862     if (len > 0 && (imask == 0 || ~mask == imask)) {
3863         /* Note that we rotate the bits to be inserted to the lsb, not to
3864            the position as described in the PoO.  */
3865         rot = (rot - pos) & 63;
3866     } else {
3867         pos = -1;
3868     }
3869 
3870     /* Rotate the input as necessary.  */
3871     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3872 
3873     /* Insert the selected bits into the output.  */
3874     if (pos >= 0) {
3875         if (imask == 0) {
3876             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3877         } else {
3878             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3879         }
3880     } else if (imask == 0) {
3881         tcg_gen_andi_i64(o->out, o->in2, mask);
3882     } else {
3883         tcg_gen_andi_i64(o->in2, o->in2, mask);
3884         tcg_gen_andi_i64(o->out, o->out, imask);
3885         tcg_gen_or_i64(o->out, o->out, o->in2);
3886     }
3887     return DISAS_NEXT;
3888 }
3889 
3890 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3891 {
3892     int i3 = get_field(s, i3);
3893     int i4 = get_field(s, i4);
3894     int i5 = get_field(s, i5);
3895     uint64_t mask;
3896 
3897     /* If this is a test-only form, arrange to discard the result.  */
3898     if (i3 & 0x80) {
3899         o->out = tcg_temp_new_i64();
3900         o->g_out = false;
3901     }
3902 
3903     i3 &= 63;
3904     i4 &= 63;
3905     i5 &= 63;
3906 
3907     /* MASK is the set of bits to be operated on from R2.
3908        Take care for I3/I4 wraparound.  */
3909     mask = ~0ull >> i3;
3910     if (i3 <= i4) {
3911         mask ^= ~0ull >> i4 >> 1;
3912     } else {
3913         mask |= ~(~0ull >> i4 >> 1);
3914     }
3915 
3916     /* Rotate the input as necessary.  */
3917     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3918 
3919     /* Operate.  */
3920     switch (s->fields.op2) {
3921     case 0x54: /* AND */
3922         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3923         tcg_gen_and_i64(o->out, o->out, o->in2);
3924         break;
3925     case 0x56: /* OR */
3926         tcg_gen_andi_i64(o->in2, o->in2, mask);
3927         tcg_gen_or_i64(o->out, o->out, o->in2);
3928         break;
3929     case 0x57: /* XOR */
3930         tcg_gen_andi_i64(o->in2, o->in2, mask);
3931         tcg_gen_xor_i64(o->out, o->out, o->in2);
3932         break;
3933     default:
3934         abort();
3935     }
3936 
3937     /* Set the CC.  */
3938     tcg_gen_andi_i64(cc_dst, o->out, mask);
3939     set_cc_nz_u64(s, cc_dst);
3940     return DISAS_NEXT;
3941 }
3942 
3943 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3944 {
3945     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3946     return DISAS_NEXT;
3947 }
3948 
3949 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3950 {
3951     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3952     return DISAS_NEXT;
3953 }
3954 
3955 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3956 {
3957     tcg_gen_bswap64_i64(o->out, o->in2);
3958     return DISAS_NEXT;
3959 }
3960 
3961 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3962 {
3963     TCGv_i32 t1 = tcg_temp_new_i32();
3964     TCGv_i32 t2 = tcg_temp_new_i32();
3965     TCGv_i32 to = tcg_temp_new_i32();
3966     tcg_gen_extrl_i64_i32(t1, o->in1);
3967     tcg_gen_extrl_i64_i32(t2, o->in2);
3968     tcg_gen_rotl_i32(to, t1, t2);
3969     tcg_gen_extu_i32_i64(o->out, to);
3970     tcg_temp_free_i32(t1);
3971     tcg_temp_free_i32(t2);
3972     tcg_temp_free_i32(to);
3973     return DISAS_NEXT;
3974 }
3975 
3976 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3977 {
3978     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3979     return DISAS_NEXT;
3980 }
3981 
3982 #ifndef CONFIG_USER_ONLY
3983 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3984 {
3985     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3986     set_cc_static(s);
3987     return DISAS_NEXT;
3988 }
3989 
3990 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3991 {
3992     gen_helper_sacf(cpu_env, o->in2);
3993     /* Addressing mode has changed, so end the block.  */
3994     return DISAS_TOO_MANY;
3995 }
3996 #endif
3997 
3998 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3999 {
4000     int sam = s->insn->data;
4001     TCGv_i64 tsam;
4002     uint64_t mask;
4003 
4004     switch (sam) {
4005     case 0:
4006         mask = 0xffffff;
4007         break;
4008     case 1:
4009         mask = 0x7fffffff;
4010         break;
4011     default:
4012         mask = -1;
4013         break;
4014     }
4015 
4016     /* Bizarre but true, we check the address of the current insn for the
4017        specification exception, not the next to be executed.  Thus the PoO
4018        documents that Bad Things Happen two bytes before the end.  */
4019     if (s->base.pc_next & ~mask) {
4020         gen_program_exception(s, PGM_SPECIFICATION);
4021         return DISAS_NORETURN;
4022     }
4023     s->pc_tmp &= mask;
4024 
4025     tsam = tcg_const_i64(sam);
4026     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
4027     tcg_temp_free_i64(tsam);
4028 
4029     /* Always exit the TB, since we (may have) changed execution mode.  */
4030     return DISAS_TOO_MANY;
4031 }
4032 
4033 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
4034 {
4035     int r1 = get_field(s, r1);
4036     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
4037     return DISAS_NEXT;
4038 }
4039 
4040 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
4041 {
4042     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
4043     return DISAS_NEXT;
4044 }
4045 
4046 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
4047 {
4048     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
4049     return DISAS_NEXT;
4050 }
4051 
4052 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
4053 {
4054     gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
4055     return_low128(o->out2);
4056     return DISAS_NEXT;
4057 }
4058 
4059 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
4060 {
4061     gen_helper_sqeb(o->out, cpu_env, o->in2);
4062     return DISAS_NEXT;
4063 }
4064 
4065 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
4066 {
4067     gen_helper_sqdb(o->out, cpu_env, o->in2);
4068     return DISAS_NEXT;
4069 }
4070 
4071 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
4072 {
4073     gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
4074     return_low128(o->out2);
4075     return DISAS_NEXT;
4076 }
4077 
4078 #ifndef CONFIG_USER_ONLY
4079 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
4080 {
4081     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
4082     set_cc_static(s);
4083     return DISAS_NEXT;
4084 }
4085 
4086 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
4087 {
4088     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4089     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4090     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
4091     set_cc_static(s);
4092     tcg_temp_free_i32(r1);
4093     tcg_temp_free_i32(r3);
4094     return DISAS_NEXT;
4095 }
4096 #endif
4097 
4098 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
4099 {
4100     DisasCompare c;
4101     TCGv_i64 a, h;
4102     TCGLabel *lab;
4103     int r1;
4104 
4105     disas_jcc(s, &c, get_field(s, m3));
4106 
4107     /* We want to store when the condition is fulfilled, so branch
4108        out when it's not */
4109     c.cond = tcg_invert_cond(c.cond);
4110 
4111     lab = gen_new_label();
4112     if (c.is_64) {
4113         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
4114     } else {
4115         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
4116     }
4117     free_compare(&c);
4118 
4119     r1 = get_field(s, r1);
4120     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
4121     switch (s->insn->data) {
4122     case 1: /* STOCG */
4123         tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
4124         break;
4125     case 0: /* STOC */
4126         tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
4127         break;
4128     case 2: /* STOCFH */
4129         h = tcg_temp_new_i64();
4130         tcg_gen_shri_i64(h, regs[r1], 32);
4131         tcg_gen_qemu_st32(h, a, get_mem_index(s));
4132         tcg_temp_free_i64(h);
4133         break;
4134     default:
4135         g_assert_not_reached();
4136     }
4137     tcg_temp_free_i64(a);
4138 
4139     gen_set_label(lab);
4140     return DISAS_NEXT;
4141 }
4142 
4143 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
4144 {
4145     TCGv_i64 t;
4146     uint64_t sign = 1ull << s->insn->data;
4147     if (s->insn->data == 31) {
4148         t = tcg_temp_new_i64();
4149         tcg_gen_shli_i64(t, o->in1, 32);
4150     } else {
4151         t = o->in1;
4152     }
4153     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
4154     if (s->insn->data == 31) {
4155         tcg_temp_free_i64(t);
4156     }
4157     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4158     /* The arithmetic left shift is curious in that it does not affect
4159        the sign bit.  Copy that over from the source unchanged.  */
4160     tcg_gen_andi_i64(o->out, o->out, ~sign);
4161     tcg_gen_andi_i64(o->in1, o->in1, sign);
4162     tcg_gen_or_i64(o->out, o->out, o->in1);
4163     return DISAS_NEXT;
4164 }
4165 
4166 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
4167 {
4168     tcg_gen_shl_i64(o->out, o->in1, o->in2);
4169     return DISAS_NEXT;
4170 }
4171 
4172 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4173 {
4174     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4175     return DISAS_NEXT;
4176 }
4177 
4178 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4179 {
4180     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4181     return DISAS_NEXT;
4182 }
4183 
4184 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4185 {
4186     gen_helper_sfpc(cpu_env, o->in2);
4187     return DISAS_NEXT;
4188 }
4189 
4190 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4191 {
4192     gen_helper_sfas(cpu_env, o->in2);
4193     return DISAS_NEXT;
4194 }
4195 
4196 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4197 {
4198     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4199     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4200     gen_helper_srnm(cpu_env, o->addr1);
4201     return DISAS_NEXT;
4202 }
4203 
4204 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4205 {
4206     /* Bits 0-55 are are ignored. */
4207     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4208     gen_helper_srnm(cpu_env, o->addr1);
4209     return DISAS_NEXT;
4210 }
4211 
4212 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4213 {
4214     TCGv_i64 tmp = tcg_temp_new_i64();
4215 
4216     /* Bits other than 61-63 are ignored. */
4217     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4218 
4219     /* No need to call a helper, we don't implement dfp */
4220     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4221     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4222     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4223 
4224     tcg_temp_free_i64(tmp);
4225     return DISAS_NEXT;
4226 }
4227 
4228 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4229 {
4230     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4231     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4232     set_cc_static(s);
4233 
4234     tcg_gen_shri_i64(o->in1, o->in1, 24);
4235     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4236     return DISAS_NEXT;
4237 }
4238 
4239 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4240 {
4241     int b1 = get_field(s, b1);
4242     int d1 = get_field(s, d1);
4243     int b2 = get_field(s, b2);
4244     int d2 = get_field(s, d2);
4245     int r3 = get_field(s, r3);
4246     TCGv_i64 tmp = tcg_temp_new_i64();
4247 
4248     /* fetch all operands first */
4249     o->in1 = tcg_temp_new_i64();
4250     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4251     o->in2 = tcg_temp_new_i64();
4252     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4253     o->addr1 = tcg_temp_new_i64();
4254     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4255 
4256     /* load the third operand into r3 before modifying anything */
4257     tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
4258 
4259     /* subtract CPU timer from first operand and store in GR0 */
4260     gen_helper_stpt(tmp, cpu_env);
4261     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4262 
4263     /* store second operand in GR1 */
4264     tcg_gen_mov_i64(regs[1], o->in2);
4265 
4266     tcg_temp_free_i64(tmp);
4267     return DISAS_NEXT;
4268 }
4269 
4270 #ifndef CONFIG_USER_ONLY
4271 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4272 {
4273     tcg_gen_shri_i64(o->in2, o->in2, 4);
4274     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4275     return DISAS_NEXT;
4276 }
4277 
4278 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4279 {
4280     gen_helper_sske(cpu_env, o->in1, o->in2);
4281     return DISAS_NEXT;
4282 }
4283 
4284 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4285 {
4286     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4287     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4288     s->exit_to_mainloop = true;
4289     return DISAS_TOO_MANY;
4290 }
4291 
4292 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4293 {
4294     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4295     return DISAS_NEXT;
4296 }
4297 #endif
4298 
4299 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4300 {
4301     gen_helper_stck(o->out, cpu_env);
4302     /* ??? We don't implement clock states.  */
4303     gen_op_movi_cc(s, 0);
4304     return DISAS_NEXT;
4305 }
4306 
4307 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4308 {
4309     TCGv_i64 c1 = tcg_temp_new_i64();
4310     TCGv_i64 c2 = tcg_temp_new_i64();
4311     TCGv_i64 todpr = tcg_temp_new_i64();
4312     gen_helper_stck(c1, cpu_env);
4313     /* 16 bit value store in an uint32_t (only valid bits set) */
4314     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4315     /* Shift the 64-bit value into its place as a zero-extended
4316        104-bit value.  Note that "bit positions 64-103 are always
4317        non-zero so that they compare differently to STCK"; we set
4318        the least significant bit to 1.  */
4319     tcg_gen_shli_i64(c2, c1, 56);
4320     tcg_gen_shri_i64(c1, c1, 8);
4321     tcg_gen_ori_i64(c2, c2, 0x10000);
4322     tcg_gen_or_i64(c2, c2, todpr);
4323     tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
4324     tcg_gen_addi_i64(o->in2, o->in2, 8);
4325     tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
4326     tcg_temp_free_i64(c1);
4327     tcg_temp_free_i64(c2);
4328     tcg_temp_free_i64(todpr);
4329     /* ??? We don't implement clock states.  */
4330     gen_op_movi_cc(s, 0);
4331     return DISAS_NEXT;
4332 }
4333 
4334 #ifndef CONFIG_USER_ONLY
4335 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4336 {
4337     gen_helper_sck(cc_op, cpu_env, o->in2);
4338     set_cc_static(s);
4339     return DISAS_NEXT;
4340 }
4341 
4342 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4343 {
4344     gen_helper_sckc(cpu_env, o->in2);
4345     return DISAS_NEXT;
4346 }
4347 
4348 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4349 {
4350     gen_helper_sckpf(cpu_env, regs[0]);
4351     return DISAS_NEXT;
4352 }
4353 
4354 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4355 {
4356     gen_helper_stckc(o->out, cpu_env);
4357     return DISAS_NEXT;
4358 }
4359 
4360 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4361 {
4362     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4363     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4364     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4365     tcg_temp_free_i32(r1);
4366     tcg_temp_free_i32(r3);
4367     return DISAS_NEXT;
4368 }
4369 
4370 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4371 {
4372     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4373     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4374     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4375     tcg_temp_free_i32(r1);
4376     tcg_temp_free_i32(r3);
4377     return DISAS_NEXT;
4378 }
4379 
4380 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4381 {
4382     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4383     return DISAS_NEXT;
4384 }
4385 
4386 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4387 {
4388     gen_helper_spt(cpu_env, o->in2);
4389     return DISAS_NEXT;
4390 }
4391 
4392 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4393 {
4394     gen_helper_stfl(cpu_env);
4395     return DISAS_NEXT;
4396 }
4397 
4398 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4399 {
4400     gen_helper_stpt(o->out, cpu_env);
4401     return DISAS_NEXT;
4402 }
4403 
4404 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4405 {
4406     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4407     set_cc_static(s);
4408     return DISAS_NEXT;
4409 }
4410 
4411 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4412 {
4413     gen_helper_spx(cpu_env, o->in2);
4414     return DISAS_NEXT;
4415 }
4416 
4417 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4418 {
4419     gen_helper_xsch(cpu_env, regs[1]);
4420     set_cc_static(s);
4421     return DISAS_NEXT;
4422 }
4423 
4424 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4425 {
4426     gen_helper_csch(cpu_env, regs[1]);
4427     set_cc_static(s);
4428     return DISAS_NEXT;
4429 }
4430 
4431 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4432 {
4433     gen_helper_hsch(cpu_env, regs[1]);
4434     set_cc_static(s);
4435     return DISAS_NEXT;
4436 }
4437 
4438 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4439 {
4440     gen_helper_msch(cpu_env, regs[1], o->in2);
4441     set_cc_static(s);
4442     return DISAS_NEXT;
4443 }
4444 
4445 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4446 {
4447     gen_helper_rchp(cpu_env, regs[1]);
4448     set_cc_static(s);
4449     return DISAS_NEXT;
4450 }
4451 
4452 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4453 {
4454     gen_helper_rsch(cpu_env, regs[1]);
4455     set_cc_static(s);
4456     return DISAS_NEXT;
4457 }
4458 
4459 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4460 {
4461     gen_helper_sal(cpu_env, regs[1]);
4462     return DISAS_NEXT;
4463 }
4464 
4465 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4466 {
4467     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4468     return DISAS_NEXT;
4469 }
4470 
4471 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4472 {
4473     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4474     gen_op_movi_cc(s, 3);
4475     return DISAS_NEXT;
4476 }
4477 
4478 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4479 {
4480     /* The instruction is suppressed if not provided. */
4481     return DISAS_NEXT;
4482 }
4483 
4484 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4485 {
4486     gen_helper_ssch(cpu_env, regs[1], o->in2);
4487     set_cc_static(s);
4488     return DISAS_NEXT;
4489 }
4490 
4491 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4492 {
4493     gen_helper_stsch(cpu_env, regs[1], o->in2);
4494     set_cc_static(s);
4495     return DISAS_NEXT;
4496 }
4497 
4498 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4499 {
4500     gen_helper_stcrw(cpu_env, o->in2);
4501     set_cc_static(s);
4502     return DISAS_NEXT;
4503 }
4504 
4505 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4506 {
4507     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4508     set_cc_static(s);
4509     return DISAS_NEXT;
4510 }
4511 
4512 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4513 {
4514     gen_helper_tsch(cpu_env, regs[1], o->in2);
4515     set_cc_static(s);
4516     return DISAS_NEXT;
4517 }
4518 
4519 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4520 {
4521     gen_helper_chsc(cpu_env, o->in2);
4522     set_cc_static(s);
4523     return DISAS_NEXT;
4524 }
4525 
4526 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4527 {
4528     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4529     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4530     return DISAS_NEXT;
4531 }
4532 
4533 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4534 {
4535     uint64_t i2 = get_field(s, i2);
4536     TCGv_i64 t;
4537 
4538     /* It is important to do what the instruction name says: STORE THEN.
4539        If we let the output hook perform the store then if we fault and
4540        restart, we'll have the wrong SYSTEM MASK in place.  */
4541     t = tcg_temp_new_i64();
4542     tcg_gen_shri_i64(t, psw_mask, 56);
4543     tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4544     tcg_temp_free_i64(t);
4545 
4546     if (s->fields.op == 0xac) {
4547         tcg_gen_andi_i64(psw_mask, psw_mask,
4548                          (i2 << 56) | 0x00ffffffffffffffull);
4549     } else {
4550         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4551     }
4552 
4553     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4554     s->exit_to_mainloop = true;
4555     return DISAS_TOO_MANY;
4556 }
4557 
4558 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4559 {
4560     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4561 
4562     if (s->base.tb->flags & FLAG_MASK_PER) {
4563         update_psw_addr(s);
4564         gen_helper_per_store_real(cpu_env);
4565     }
4566     return DISAS_NEXT;
4567 }
4568 #endif
4569 
4570 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4571 {
4572     gen_helper_stfle(cc_op, cpu_env, o->in2);
4573     set_cc_static(s);
4574     return DISAS_NEXT;
4575 }
4576 
4577 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4578 {
4579     tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4580     return DISAS_NEXT;
4581 }
4582 
4583 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4584 {
4585     tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4586     return DISAS_NEXT;
4587 }
4588 
4589 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4590 {
4591     tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4592     return DISAS_NEXT;
4593 }
4594 
4595 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4596 {
4597     tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4598     return DISAS_NEXT;
4599 }
4600 
4601 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4602 {
4603     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4604     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
4605     gen_helper_stam(cpu_env, r1, o->in2, r3);
4606     tcg_temp_free_i32(r1);
4607     tcg_temp_free_i32(r3);
4608     return DISAS_NEXT;
4609 }
4610 
4611 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4612 {
4613     int m3 = get_field(s, m3);
4614     int pos, base = s->insn->data;
4615     TCGv_i64 tmp = tcg_temp_new_i64();
4616 
4617     pos = base + ctz32(m3) * 8;
4618     switch (m3) {
4619     case 0xf:
4620         /* Effectively a 32-bit store.  */
4621         tcg_gen_shri_i64(tmp, o->in1, pos);
4622         tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4623         break;
4624 
4625     case 0xc:
4626     case 0x6:
4627     case 0x3:
4628         /* Effectively a 16-bit store.  */
4629         tcg_gen_shri_i64(tmp, o->in1, pos);
4630         tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4631         break;
4632 
4633     case 0x8:
4634     case 0x4:
4635     case 0x2:
4636     case 0x1:
4637         /* Effectively an 8-bit store.  */
4638         tcg_gen_shri_i64(tmp, o->in1, pos);
4639         tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4640         break;
4641 
4642     default:
4643         /* This is going to be a sequence of shifts and stores.  */
4644         pos = base + 32 - 8;
4645         while (m3) {
4646             if (m3 & 0x8) {
4647                 tcg_gen_shri_i64(tmp, o->in1, pos);
4648                 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4649                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4650             }
4651             m3 = (m3 << 1) & 0xf;
4652             pos -= 8;
4653         }
4654         break;
4655     }
4656     tcg_temp_free_i64(tmp);
4657     return DISAS_NEXT;
4658 }
4659 
4660 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4661 {
4662     int r1 = get_field(s, r1);
4663     int r3 = get_field(s, r3);
4664     int size = s->insn->data;
4665     TCGv_i64 tsize = tcg_const_i64(size);
4666 
4667     while (1) {
4668         if (size == 8) {
4669             tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4670         } else {
4671             tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4672         }
4673         if (r1 == r3) {
4674             break;
4675         }
4676         tcg_gen_add_i64(o->in2, o->in2, tsize);
4677         r1 = (r1 + 1) & 15;
4678     }
4679 
4680     tcg_temp_free_i64(tsize);
4681     return DISAS_NEXT;
4682 }
4683 
4684 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4685 {
4686     int r1 = get_field(s, r1);
4687     int r3 = get_field(s, r3);
4688     TCGv_i64 t = tcg_temp_new_i64();
4689     TCGv_i64 t4 = tcg_const_i64(4);
4690     TCGv_i64 t32 = tcg_const_i64(32);
4691 
4692     while (1) {
4693         tcg_gen_shl_i64(t, regs[r1], t32);
4694         tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4695         if (r1 == r3) {
4696             break;
4697         }
4698         tcg_gen_add_i64(o->in2, o->in2, t4);
4699         r1 = (r1 + 1) & 15;
4700     }
4701 
4702     tcg_temp_free_i64(t);
4703     tcg_temp_free_i64(t4);
4704     tcg_temp_free_i64(t32);
4705     return DISAS_NEXT;
4706 }
4707 
4708 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4709 {
4710     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4711         gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4712     } else if (HAVE_ATOMIC128) {
4713         gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4714     } else {
4715         gen_helper_exit_atomic(cpu_env);
4716         return DISAS_NORETURN;
4717     }
4718     return DISAS_NEXT;
4719 }
4720 
4721 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4722 {
4723     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4724     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4725 
4726     gen_helper_srst(cpu_env, r1, r2);
4727 
4728     tcg_temp_free_i32(r1);
4729     tcg_temp_free_i32(r2);
4730     set_cc_static(s);
4731     return DISAS_NEXT;
4732 }
4733 
4734 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4735 {
4736     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4737     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4738 
4739     gen_helper_srstu(cpu_env, r1, r2);
4740 
4741     tcg_temp_free_i32(r1);
4742     tcg_temp_free_i32(r2);
4743     set_cc_static(s);
4744     return DISAS_NEXT;
4745 }
4746 
4747 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4748 {
4749     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4750     return DISAS_NEXT;
4751 }
4752 
4753 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4754 {
4755     tcg_gen_movi_i64(cc_src, 0);
4756     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4757     return DISAS_NEXT;
4758 }
4759 
4760 /* Compute borrow (0, -1) into cc_src. */
4761 static void compute_borrow(DisasContext *s)
4762 {
4763     switch (s->cc_op) {
4764     case CC_OP_SUBU:
4765         /* The borrow value is already in cc_src (0,-1). */
4766         break;
4767     default:
4768         gen_op_calc_cc(s);
4769         /* fall through */
4770     case CC_OP_STATIC:
4771         /* The carry flag is the msb of CC; compute into cc_src. */
4772         tcg_gen_extu_i32_i64(cc_src, cc_op);
4773         tcg_gen_shri_i64(cc_src, cc_src, 1);
4774         /* fall through */
4775     case CC_OP_ADDU:
4776         /* Convert carry (1,0) to borrow (0,-1). */
4777         tcg_gen_subi_i64(cc_src, cc_src, 1);
4778         break;
4779     }
4780 }
4781 
4782 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4783 {
4784     compute_borrow(s);
4785 
4786     /* Borrow is {0, -1}, so add to subtract. */
4787     tcg_gen_add_i64(o->out, o->in1, cc_src);
4788     tcg_gen_sub_i64(o->out, o->out, o->in2);
4789     return DISAS_NEXT;
4790 }
4791 
4792 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4793 {
4794     compute_borrow(s);
4795 
4796     /*
4797      * Borrow is {0, -1}, so add to subtract; replicate the
4798      * borrow input to produce 128-bit -1 for the addition.
4799      */
4800     TCGv_i64 zero = tcg_const_i64(0);
4801     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4802     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4803     tcg_temp_free_i64(zero);
4804 
4805     return DISAS_NEXT;
4806 }
4807 
4808 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4809 {
4810     TCGv_i32 t;
4811 
4812     update_psw_addr(s);
4813     update_cc_op(s);
4814 
4815     t = tcg_const_i32(get_field(s, i1) & 0xff);
4816     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4817     tcg_temp_free_i32(t);
4818 
4819     t = tcg_const_i32(s->ilen);
4820     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4821     tcg_temp_free_i32(t);
4822 
4823     gen_exception(EXCP_SVC);
4824     return DISAS_NORETURN;
4825 }
4826 
4827 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4828 {
4829     int cc = 0;
4830 
4831     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4832     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4833     gen_op_movi_cc(s, cc);
4834     return DISAS_NEXT;
4835 }
4836 
4837 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4838 {
4839     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4840     set_cc_static(s);
4841     return DISAS_NEXT;
4842 }
4843 
4844 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4845 {
4846     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4847     set_cc_static(s);
4848     return DISAS_NEXT;
4849 }
4850 
4851 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4852 {
4853     gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4854     set_cc_static(s);
4855     return DISAS_NEXT;
4856 }
4857 
4858 #ifndef CONFIG_USER_ONLY
4859 
4860 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4861 {
4862     gen_helper_testblock(cc_op, cpu_env, o->in2);
4863     set_cc_static(s);
4864     return DISAS_NEXT;
4865 }
4866 
4867 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4868 {
4869     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4870     set_cc_static(s);
4871     return DISAS_NEXT;
4872 }
4873 
4874 #endif
4875 
4876 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4877 {
4878     TCGv_i32 l1 = tcg_const_i32(get_field(s, l1) + 1);
4879     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4880     tcg_temp_free_i32(l1);
4881     set_cc_static(s);
4882     return DISAS_NEXT;
4883 }
4884 
4885 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4886 {
4887     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4888     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4889     tcg_temp_free_i32(l);
4890     set_cc_static(s);
4891     return DISAS_NEXT;
4892 }
4893 
4894 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4895 {
4896     gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4897     return_low128(o->out2);
4898     set_cc_static(s);
4899     return DISAS_NEXT;
4900 }
4901 
4902 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4903 {
4904     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4905     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4906     tcg_temp_free_i32(l);
4907     set_cc_static(s);
4908     return DISAS_NEXT;
4909 }
4910 
4911 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4912 {
4913     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4914     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4915     tcg_temp_free_i32(l);
4916     set_cc_static(s);
4917     return DISAS_NEXT;
4918 }
4919 
4920 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4921 {
4922     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
4923     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
4924     TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4925     TCGv_i32 tst = tcg_temp_new_i32();
4926     int m3 = get_field(s, m3);
4927 
4928     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4929         m3 = 0;
4930     }
4931     if (m3 & 1) {
4932         tcg_gen_movi_i32(tst, -1);
4933     } else {
4934         tcg_gen_extrl_i64_i32(tst, regs[0]);
4935         if (s->insn->opc & 3) {
4936             tcg_gen_ext8u_i32(tst, tst);
4937         } else {
4938             tcg_gen_ext16u_i32(tst, tst);
4939         }
4940     }
4941     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4942 
4943     tcg_temp_free_i32(r1);
4944     tcg_temp_free_i32(r2);
4945     tcg_temp_free_i32(sizes);
4946     tcg_temp_free_i32(tst);
4947     set_cc_static(s);
4948     return DISAS_NEXT;
4949 }
4950 
4951 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4952 {
4953     TCGv_i32 t1 = tcg_const_i32(0xff);
4954     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4955     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4956     tcg_temp_free_i32(t1);
4957     set_cc_static(s);
4958     return DISAS_NEXT;
4959 }
4960 
4961 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4962 {
4963     TCGv_i32 l = tcg_const_i32(get_field(s, l1));
4964     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4965     tcg_temp_free_i32(l);
4966     return DISAS_NEXT;
4967 }
4968 
4969 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4970 {
4971     int l1 = get_field(s, l1) + 1;
4972     TCGv_i32 l;
4973 
4974     /* The length must not exceed 32 bytes.  */
4975     if (l1 > 32) {
4976         gen_program_exception(s, PGM_SPECIFICATION);
4977         return DISAS_NORETURN;
4978     }
4979     l = tcg_const_i32(l1);
4980     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4981     tcg_temp_free_i32(l);
4982     set_cc_static(s);
4983     return DISAS_NEXT;
4984 }
4985 
4986 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4987 {
4988     int l1 = get_field(s, l1) + 1;
4989     TCGv_i32 l;
4990 
4991     /* The length must be even and should not exceed 64 bytes.  */
4992     if ((l1 & 1) || (l1 > 64)) {
4993         gen_program_exception(s, PGM_SPECIFICATION);
4994         return DISAS_NORETURN;
4995     }
4996     l = tcg_const_i32(l1);
4997     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4998     tcg_temp_free_i32(l);
4999     set_cc_static(s);
5000     return DISAS_NEXT;
5001 }
5002 
5003 
5004 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
5005 {
5006     int d1 = get_field(s, d1);
5007     int d2 = get_field(s, d2);
5008     int b1 = get_field(s, b1);
5009     int b2 = get_field(s, b2);
5010     int l = get_field(s, l1);
5011     TCGv_i32 t32;
5012 
5013     o->addr1 = get_address(s, 0, b1, d1);
5014 
5015     /* If the addresses are identical, this is a store/memset of zero.  */
5016     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
5017         o->in2 = tcg_const_i64(0);
5018 
5019         l++;
5020         while (l >= 8) {
5021             tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
5022             l -= 8;
5023             if (l > 0) {
5024                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
5025             }
5026         }
5027         if (l >= 4) {
5028             tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
5029             l -= 4;
5030             if (l > 0) {
5031                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
5032             }
5033         }
5034         if (l >= 2) {
5035             tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
5036             l -= 2;
5037             if (l > 0) {
5038                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
5039             }
5040         }
5041         if (l) {
5042             tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
5043         }
5044         gen_op_movi_cc(s, 0);
5045         return DISAS_NEXT;
5046     }
5047 
5048     /* But in general we'll defer to a helper.  */
5049     o->in2 = get_address(s, 0, b2, d2);
5050     t32 = tcg_const_i32(l);
5051     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
5052     tcg_temp_free_i32(t32);
5053     set_cc_static(s);
5054     return DISAS_NEXT;
5055 }
5056 
5057 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
5058 {
5059     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5060     return DISAS_NEXT;
5061 }
5062 
5063 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
5064 {
5065     int shift = s->insn->data & 0xff;
5066     int size = s->insn->data >> 8;
5067     uint64_t mask = ((1ull << size) - 1) << shift;
5068 
5069     assert(!o->g_in2);
5070     tcg_gen_shli_i64(o->in2, o->in2, shift);
5071     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5072 
5073     /* Produce the CC from only the bits manipulated.  */
5074     tcg_gen_andi_i64(cc_dst, o->out, mask);
5075     set_cc_nz_u64(s, cc_dst);
5076     return DISAS_NEXT;
5077 }
5078 
5079 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
5080 {
5081     o->in1 = tcg_temp_new_i64();
5082 
5083     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5084         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
5085     } else {
5086         /* Perform the atomic operation in memory. */
5087         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
5088                                      s->insn->data);
5089     }
5090 
5091     /* Recompute also for atomic case: needed for setting CC. */
5092     tcg_gen_xor_i64(o->out, o->in1, o->in2);
5093 
5094     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
5095         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
5096     }
5097     return DISAS_NEXT;
5098 }
5099 
5100 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
5101 {
5102     o->out = tcg_const_i64(0);
5103     return DISAS_NEXT;
5104 }
5105 
5106 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
5107 {
5108     o->out = tcg_const_i64(0);
5109     o->out2 = o->out;
5110     o->g_out2 = true;
5111     return DISAS_NEXT;
5112 }
5113 
5114 #ifndef CONFIG_USER_ONLY
5115 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
5116 {
5117     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5118 
5119     gen_helper_clp(cpu_env, r2);
5120     tcg_temp_free_i32(r2);
5121     set_cc_static(s);
5122     return DISAS_NEXT;
5123 }
5124 
5125 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
5126 {
5127     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5128     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5129 
5130     gen_helper_pcilg(cpu_env, r1, r2);
5131     tcg_temp_free_i32(r1);
5132     tcg_temp_free_i32(r2);
5133     set_cc_static(s);
5134     return DISAS_NEXT;
5135 }
5136 
5137 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
5138 {
5139     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5140     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5141 
5142     gen_helper_pcistg(cpu_env, r1, r2);
5143     tcg_temp_free_i32(r1);
5144     tcg_temp_free_i32(r2);
5145     set_cc_static(s);
5146     return DISAS_NEXT;
5147 }
5148 
5149 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
5150 {
5151     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5152     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5153 
5154     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
5155     tcg_temp_free_i32(ar);
5156     tcg_temp_free_i32(r1);
5157     set_cc_static(s);
5158     return DISAS_NEXT;
5159 }
5160 
5161 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
5162 {
5163     gen_helper_sic(cpu_env, o->in1, o->in2);
5164     return DISAS_NEXT;
5165 }
5166 
5167 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
5168 {
5169     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5170     TCGv_i32 r2 = tcg_const_i32(get_field(s, r2));
5171 
5172     gen_helper_rpcit(cpu_env, r1, r2);
5173     tcg_temp_free_i32(r1);
5174     tcg_temp_free_i32(r2);
5175     set_cc_static(s);
5176     return DISAS_NEXT;
5177 }
5178 
5179 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
5180 {
5181     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5182     TCGv_i32 r3 = tcg_const_i32(get_field(s, r3));
5183     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5184 
5185     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
5186     tcg_temp_free_i32(ar);
5187     tcg_temp_free_i32(r1);
5188     tcg_temp_free_i32(r3);
5189     set_cc_static(s);
5190     return DISAS_NEXT;
5191 }
5192 
5193 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
5194 {
5195     TCGv_i32 r1 = tcg_const_i32(get_field(s, r1));
5196     TCGv_i32 ar = tcg_const_i32(get_field(s, b2));
5197 
5198     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
5199     tcg_temp_free_i32(ar);
5200     tcg_temp_free_i32(r1);
5201     set_cc_static(s);
5202     return DISAS_NEXT;
5203 }
5204 #endif
5205 
5206 #include "translate_vx.c.inc"
5207 
5208 /* ====================================================================== */
5209 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5210    the original inputs), update the various cc data structures in order to
5211    be able to compute the new condition code.  */
5212 
5213 static void cout_abs32(DisasContext *s, DisasOps *o)
5214 {
5215     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5216 }
5217 
5218 static void cout_abs64(DisasContext *s, DisasOps *o)
5219 {
5220     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5221 }
5222 
5223 static void cout_adds32(DisasContext *s, DisasOps *o)
5224 {
5225     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5226 }
5227 
5228 static void cout_adds64(DisasContext *s, DisasOps *o)
5229 {
5230     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5231 }
5232 
5233 static void cout_addu32(DisasContext *s, DisasOps *o)
5234 {
5235     tcg_gen_shri_i64(cc_src, o->out, 32);
5236     tcg_gen_ext32u_i64(cc_dst, o->out);
5237     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5238 }
5239 
5240 static void cout_addu64(DisasContext *s, DisasOps *o)
5241 {
5242     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5243 }
5244 
5245 static void cout_cmps32(DisasContext *s, DisasOps *o)
5246 {
5247     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5248 }
5249 
5250 static void cout_cmps64(DisasContext *s, DisasOps *o)
5251 {
5252     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5253 }
5254 
5255 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5256 {
5257     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5258 }
5259 
5260 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5261 {
5262     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5263 }
5264 
5265 static void cout_f32(DisasContext *s, DisasOps *o)
5266 {
5267     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5268 }
5269 
5270 static void cout_f64(DisasContext *s, DisasOps *o)
5271 {
5272     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5273 }
5274 
5275 static void cout_f128(DisasContext *s, DisasOps *o)
5276 {
5277     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5278 }
5279 
5280 static void cout_nabs32(DisasContext *s, DisasOps *o)
5281 {
5282     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5283 }
5284 
5285 static void cout_nabs64(DisasContext *s, DisasOps *o)
5286 {
5287     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5288 }
5289 
5290 static void cout_neg32(DisasContext *s, DisasOps *o)
5291 {
5292     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5293 }
5294 
5295 static void cout_neg64(DisasContext *s, DisasOps *o)
5296 {
5297     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5298 }
5299 
5300 static void cout_nz32(DisasContext *s, DisasOps *o)
5301 {
5302     tcg_gen_ext32u_i64(cc_dst, o->out);
5303     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5304 }
5305 
5306 static void cout_nz64(DisasContext *s, DisasOps *o)
5307 {
5308     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5309 }
5310 
5311 static void cout_s32(DisasContext *s, DisasOps *o)
5312 {
5313     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5314 }
5315 
5316 static void cout_s64(DisasContext *s, DisasOps *o)
5317 {
5318     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5319 }
5320 
5321 static void cout_subs32(DisasContext *s, DisasOps *o)
5322 {
5323     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5324 }
5325 
5326 static void cout_subs64(DisasContext *s, DisasOps *o)
5327 {
5328     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5329 }
5330 
5331 static void cout_subu32(DisasContext *s, DisasOps *o)
5332 {
5333     tcg_gen_sari_i64(cc_src, o->out, 32);
5334     tcg_gen_ext32u_i64(cc_dst, o->out);
5335     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5336 }
5337 
5338 static void cout_subu64(DisasContext *s, DisasOps *o)
5339 {
5340     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5341 }
5342 
5343 static void cout_tm32(DisasContext *s, DisasOps *o)
5344 {
5345     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5346 }
5347 
5348 static void cout_tm64(DisasContext *s, DisasOps *o)
5349 {
5350     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5351 }
5352 
5353 static void cout_muls32(DisasContext *s, DisasOps *o)
5354 {
5355     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5356 }
5357 
5358 static void cout_muls64(DisasContext *s, DisasOps *o)
5359 {
5360     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5361     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5362 }
5363 
5364 /* ====================================================================== */
5365 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5366    with the TCG register to which we will write.  Used in combination with
5367    the "wout" generators, in some cases we need a new temporary, and in
5368    some cases we can write to a TCG global.  */
5369 
5370 static void prep_new(DisasContext *s, DisasOps *o)
5371 {
5372     o->out = tcg_temp_new_i64();
5373 }
5374 #define SPEC_prep_new 0
5375 
5376 static void prep_new_P(DisasContext *s, DisasOps *o)
5377 {
5378     o->out = tcg_temp_new_i64();
5379     o->out2 = tcg_temp_new_i64();
5380 }
5381 #define SPEC_prep_new_P 0
5382 
5383 static void prep_r1(DisasContext *s, DisasOps *o)
5384 {
5385     o->out = regs[get_field(s, r1)];
5386     o->g_out = true;
5387 }
5388 #define SPEC_prep_r1 0
5389 
5390 static void prep_r1_P(DisasContext *s, DisasOps *o)
5391 {
5392     int r1 = get_field(s, r1);
5393     o->out = regs[r1];
5394     o->out2 = regs[r1 + 1];
5395     o->g_out = o->g_out2 = true;
5396 }
5397 #define SPEC_prep_r1_P SPEC_r1_even
5398 
5399 /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */
5400 static void prep_x1(DisasContext *s, DisasOps *o)
5401 {
5402     o->out = load_freg(get_field(s, r1));
5403     o->out2 = load_freg(get_field(s, r1) + 2);
5404 }
5405 #define SPEC_prep_x1 SPEC_r1_f128
5406 
5407 /* ====================================================================== */
5408 /* The "Write OUTput" generators.  These generally perform some non-trivial
5409    copy of data to TCG globals, or to main memory.  The trivial cases are
5410    generally handled by having a "prep" generator install the TCG global
5411    as the destination of the operation.  */
5412 
5413 static void wout_r1(DisasContext *s, DisasOps *o)
5414 {
5415     store_reg(get_field(s, r1), o->out);
5416 }
5417 #define SPEC_wout_r1 0
5418 
5419 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5420 {
5421     store_reg(get_field(s, r1), o->out2);
5422 }
5423 #define SPEC_wout_out2_r1 0
5424 
5425 static void wout_r1_8(DisasContext *s, DisasOps *o)
5426 {
5427     int r1 = get_field(s, r1);
5428     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5429 }
5430 #define SPEC_wout_r1_8 0
5431 
5432 static void wout_r1_16(DisasContext *s, DisasOps *o)
5433 {
5434     int r1 = get_field(s, r1);
5435     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5436 }
5437 #define SPEC_wout_r1_16 0
5438 
5439 static void wout_r1_32(DisasContext *s, DisasOps *o)
5440 {
5441     store_reg32_i64(get_field(s, r1), o->out);
5442 }
5443 #define SPEC_wout_r1_32 0
5444 
5445 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5446 {
5447     store_reg32h_i64(get_field(s, r1), o->out);
5448 }
5449 #define SPEC_wout_r1_32h 0
5450 
5451 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5452 {
5453     int r1 = get_field(s, r1);
5454     store_reg32_i64(r1, o->out);
5455     store_reg32_i64(r1 + 1, o->out2);
5456 }
5457 #define SPEC_wout_r1_P32 SPEC_r1_even
5458 
5459 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5460 {
5461     int r1 = get_field(s, r1);
5462     TCGv_i64 t = tcg_temp_new_i64();
5463     store_reg32_i64(r1 + 1, o->out);
5464     tcg_gen_shri_i64(t, o->out, 32);
5465     store_reg32_i64(r1, t);
5466     tcg_temp_free_i64(t);
5467 }
5468 #define SPEC_wout_r1_D32 SPEC_r1_even
5469 
5470 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5471 {
5472     int r3 = get_field(s, r3);
5473     store_reg32_i64(r3, o->out);
5474     store_reg32_i64(r3 + 1, o->out2);
5475 }
5476 #define SPEC_wout_r3_P32 SPEC_r3_even
5477 
5478 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5479 {
5480     int r3 = get_field(s, r3);
5481     store_reg(r3, o->out);
5482     store_reg(r3 + 1, o->out2);
5483 }
5484 #define SPEC_wout_r3_P64 SPEC_r3_even
5485 
5486 static void wout_e1(DisasContext *s, DisasOps *o)
5487 {
5488     store_freg32_i64(get_field(s, r1), o->out);
5489 }
5490 #define SPEC_wout_e1 0
5491 
5492 static void wout_f1(DisasContext *s, DisasOps *o)
5493 {
5494     store_freg(get_field(s, r1), o->out);
5495 }
5496 #define SPEC_wout_f1 0
5497 
5498 static void wout_x1(DisasContext *s, DisasOps *o)
5499 {
5500     int f1 = get_field(s, r1);
5501     store_freg(f1, o->out);
5502     store_freg(f1 + 2, o->out2);
5503 }
5504 #define SPEC_wout_x1 SPEC_r1_f128
5505 
5506 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5507 {
5508     if (get_field(s, r1) != get_field(s, r2)) {
5509         store_reg32_i64(get_field(s, r1), o->out);
5510     }
5511 }
5512 #define SPEC_wout_cond_r1r2_32 0
5513 
5514 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5515 {
5516     if (get_field(s, r1) != get_field(s, r2)) {
5517         store_freg32_i64(get_field(s, r1), o->out);
5518     }
5519 }
5520 #define SPEC_wout_cond_e1e2 0
5521 
5522 static void wout_m1_8(DisasContext *s, DisasOps *o)
5523 {
5524     tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
5525 }
5526 #define SPEC_wout_m1_8 0
5527 
5528 static void wout_m1_16(DisasContext *s, DisasOps *o)
5529 {
5530     tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
5531 }
5532 #define SPEC_wout_m1_16 0
5533 
5534 #ifndef CONFIG_USER_ONLY
5535 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5536 {
5537     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5538 }
5539 #define SPEC_wout_m1_16a 0
5540 #endif
5541 
5542 static void wout_m1_32(DisasContext *s, DisasOps *o)
5543 {
5544     tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
5545 }
5546 #define SPEC_wout_m1_32 0
5547 
5548 #ifndef CONFIG_USER_ONLY
5549 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5550 {
5551     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5552 }
5553 #define SPEC_wout_m1_32a 0
5554 #endif
5555 
5556 static void wout_m1_64(DisasContext *s, DisasOps *o)
5557 {
5558     tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
5559 }
5560 #define SPEC_wout_m1_64 0
5561 
5562 #ifndef CONFIG_USER_ONLY
5563 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5564 {
5565     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5566 }
5567 #define SPEC_wout_m1_64a 0
5568 #endif
5569 
5570 static void wout_m2_32(DisasContext *s, DisasOps *o)
5571 {
5572     tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
5573 }
5574 #define SPEC_wout_m2_32 0
5575 
5576 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5577 {
5578     store_reg(get_field(s, r1), o->in2);
5579 }
5580 #define SPEC_wout_in2_r1 0
5581 
5582 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5583 {
5584     store_reg32_i64(get_field(s, r1), o->in2);
5585 }
5586 #define SPEC_wout_in2_r1_32 0
5587 
5588 /* ====================================================================== */
5589 /* The "INput 1" generators.  These load the first operand to an insn.  */
5590 
5591 static void in1_r1(DisasContext *s, DisasOps *o)
5592 {
5593     o->in1 = load_reg(get_field(s, r1));
5594 }
5595 #define SPEC_in1_r1 0
5596 
5597 static void in1_r1_o(DisasContext *s, DisasOps *o)
5598 {
5599     o->in1 = regs[get_field(s, r1)];
5600     o->g_in1 = true;
5601 }
5602 #define SPEC_in1_r1_o 0
5603 
5604 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5605 {
5606     o->in1 = tcg_temp_new_i64();
5607     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5608 }
5609 #define SPEC_in1_r1_32s 0
5610 
5611 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5612 {
5613     o->in1 = tcg_temp_new_i64();
5614     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5615 }
5616 #define SPEC_in1_r1_32u 0
5617 
5618 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5619 {
5620     o->in1 = tcg_temp_new_i64();
5621     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5622 }
5623 #define SPEC_in1_r1_sr32 0
5624 
5625 static void in1_r1p1(DisasContext *s, DisasOps *o)
5626 {
5627     o->in1 = load_reg(get_field(s, r1) + 1);
5628 }
5629 #define SPEC_in1_r1p1 SPEC_r1_even
5630 
5631 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5632 {
5633     o->in1 = regs[get_field(s, r1) + 1];
5634     o->g_in1 = true;
5635 }
5636 #define SPEC_in1_r1p1_o SPEC_r1_even
5637 
5638 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5639 {
5640     o->in1 = tcg_temp_new_i64();
5641     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5642 }
5643 #define SPEC_in1_r1p1_32s SPEC_r1_even
5644 
5645 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5646 {
5647     o->in1 = tcg_temp_new_i64();
5648     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5649 }
5650 #define SPEC_in1_r1p1_32u SPEC_r1_even
5651 
5652 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5653 {
5654     int r1 = get_field(s, r1);
5655     o->in1 = tcg_temp_new_i64();
5656     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5657 }
5658 #define SPEC_in1_r1_D32 SPEC_r1_even
5659 
5660 static void in1_r2(DisasContext *s, DisasOps *o)
5661 {
5662     o->in1 = load_reg(get_field(s, r2));
5663 }
5664 #define SPEC_in1_r2 0
5665 
5666 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5667 {
5668     o->in1 = tcg_temp_new_i64();
5669     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5670 }
5671 #define SPEC_in1_r2_sr32 0
5672 
5673 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5674 {
5675     o->in1 = tcg_temp_new_i64();
5676     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5677 }
5678 #define SPEC_in1_r2_32u 0
5679 
5680 static void in1_r3(DisasContext *s, DisasOps *o)
5681 {
5682     o->in1 = load_reg(get_field(s, r3));
5683 }
5684 #define SPEC_in1_r3 0
5685 
5686 static void in1_r3_o(DisasContext *s, DisasOps *o)
5687 {
5688     o->in1 = regs[get_field(s, r3)];
5689     o->g_in1 = true;
5690 }
5691 #define SPEC_in1_r3_o 0
5692 
5693 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5694 {
5695     o->in1 = tcg_temp_new_i64();
5696     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5697 }
5698 #define SPEC_in1_r3_32s 0
5699 
5700 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5701 {
5702     o->in1 = tcg_temp_new_i64();
5703     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5704 }
5705 #define SPEC_in1_r3_32u 0
5706 
5707 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5708 {
5709     int r3 = get_field(s, r3);
5710     o->in1 = tcg_temp_new_i64();
5711     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5712 }
5713 #define SPEC_in1_r3_D32 SPEC_r3_even
5714 
5715 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5716 {
5717     o->in1 = tcg_temp_new_i64();
5718     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5719 }
5720 #define SPEC_in1_r3_sr32 0
5721 
5722 static void in1_e1(DisasContext *s, DisasOps *o)
5723 {
5724     o->in1 = load_freg32_i64(get_field(s, r1));
5725 }
5726 #define SPEC_in1_e1 0
5727 
5728 static void in1_f1(DisasContext *s, DisasOps *o)
5729 {
5730     o->in1 = load_freg(get_field(s, r1));
5731 }
5732 #define SPEC_in1_f1 0
5733 
5734 /* Load the high double word of an extended (128-bit) format FP number */
5735 static void in1_x2h(DisasContext *s, DisasOps *o)
5736 {
5737     o->in1 = load_freg(get_field(s, r2));
5738 }
5739 #define SPEC_in1_x2h SPEC_r2_f128
5740 
5741 static void in1_f3(DisasContext *s, DisasOps *o)
5742 {
5743     o->in1 = load_freg(get_field(s, r3));
5744 }
5745 #define SPEC_in1_f3 0
5746 
5747 static void in1_la1(DisasContext *s, DisasOps *o)
5748 {
5749     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5750 }
5751 #define SPEC_in1_la1 0
5752 
5753 static void in1_la2(DisasContext *s, DisasOps *o)
5754 {
5755     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5756     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5757 }
5758 #define SPEC_in1_la2 0
5759 
5760 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5761 {
5762     in1_la1(s, o);
5763     o->in1 = tcg_temp_new_i64();
5764     tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5765 }
5766 #define SPEC_in1_m1_8u 0
5767 
5768 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5769 {
5770     in1_la1(s, o);
5771     o->in1 = tcg_temp_new_i64();
5772     tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5773 }
5774 #define SPEC_in1_m1_16s 0
5775 
5776 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5777 {
5778     in1_la1(s, o);
5779     o->in1 = tcg_temp_new_i64();
5780     tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5781 }
5782 #define SPEC_in1_m1_16u 0
5783 
5784 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5785 {
5786     in1_la1(s, o);
5787     o->in1 = tcg_temp_new_i64();
5788     tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5789 }
5790 #define SPEC_in1_m1_32s 0
5791 
5792 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5793 {
5794     in1_la1(s, o);
5795     o->in1 = tcg_temp_new_i64();
5796     tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5797 }
5798 #define SPEC_in1_m1_32u 0
5799 
5800 static void in1_m1_64(DisasContext *s, DisasOps *o)
5801 {
5802     in1_la1(s, o);
5803     o->in1 = tcg_temp_new_i64();
5804     tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5805 }
5806 #define SPEC_in1_m1_64 0
5807 
5808 /* ====================================================================== */
5809 /* The "INput 2" generators.  These load the second operand to an insn.  */
5810 
5811 static void in2_r1_o(DisasContext *s, DisasOps *o)
5812 {
5813     o->in2 = regs[get_field(s, r1)];
5814     o->g_in2 = true;
5815 }
5816 #define SPEC_in2_r1_o 0
5817 
5818 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5819 {
5820     o->in2 = tcg_temp_new_i64();
5821     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5822 }
5823 #define SPEC_in2_r1_16u 0
5824 
5825 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5826 {
5827     o->in2 = tcg_temp_new_i64();
5828     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5829 }
5830 #define SPEC_in2_r1_32u 0
5831 
5832 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5833 {
5834     int r1 = get_field(s, r1);
5835     o->in2 = tcg_temp_new_i64();
5836     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5837 }
5838 #define SPEC_in2_r1_D32 SPEC_r1_even
5839 
5840 static void in2_r2(DisasContext *s, DisasOps *o)
5841 {
5842     o->in2 = load_reg(get_field(s, r2));
5843 }
5844 #define SPEC_in2_r2 0
5845 
5846 static void in2_r2_o(DisasContext *s, DisasOps *o)
5847 {
5848     o->in2 = regs[get_field(s, r2)];
5849     o->g_in2 = true;
5850 }
5851 #define SPEC_in2_r2_o 0
5852 
5853 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5854 {
5855     int r2 = get_field(s, r2);
5856     if (r2 != 0) {
5857         o->in2 = load_reg(r2);
5858     }
5859 }
5860 #define SPEC_in2_r2_nz 0
5861 
5862 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5863 {
5864     o->in2 = tcg_temp_new_i64();
5865     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5866 }
5867 #define SPEC_in2_r2_8s 0
5868 
5869 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5870 {
5871     o->in2 = tcg_temp_new_i64();
5872     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5873 }
5874 #define SPEC_in2_r2_8u 0
5875 
5876 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5877 {
5878     o->in2 = tcg_temp_new_i64();
5879     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5880 }
5881 #define SPEC_in2_r2_16s 0
5882 
5883 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5884 {
5885     o->in2 = tcg_temp_new_i64();
5886     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5887 }
5888 #define SPEC_in2_r2_16u 0
5889 
5890 static void in2_r3(DisasContext *s, DisasOps *o)
5891 {
5892     o->in2 = load_reg(get_field(s, r3));
5893 }
5894 #define SPEC_in2_r3 0
5895 
5896 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5897 {
5898     o->in2 = tcg_temp_new_i64();
5899     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5900 }
5901 #define SPEC_in2_r3_sr32 0
5902 
5903 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5904 {
5905     o->in2 = tcg_temp_new_i64();
5906     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5907 }
5908 #define SPEC_in2_r3_32u 0
5909 
5910 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5911 {
5912     o->in2 = tcg_temp_new_i64();
5913     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5914 }
5915 #define SPEC_in2_r2_32s 0
5916 
5917 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5918 {
5919     o->in2 = tcg_temp_new_i64();
5920     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5921 }
5922 #define SPEC_in2_r2_32u 0
5923 
5924 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5925 {
5926     o->in2 = tcg_temp_new_i64();
5927     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5928 }
5929 #define SPEC_in2_r2_sr32 0
5930 
5931 static void in2_e2(DisasContext *s, DisasOps *o)
5932 {
5933     o->in2 = load_freg32_i64(get_field(s, r2));
5934 }
5935 #define SPEC_in2_e2 0
5936 
5937 static void in2_f2(DisasContext *s, DisasOps *o)
5938 {
5939     o->in2 = load_freg(get_field(s, r2));
5940 }
5941 #define SPEC_in2_f2 0
5942 
5943 /* Load the low double word of an extended (128-bit) format FP number */
5944 static void in2_x2l(DisasContext *s, DisasOps *o)
5945 {
5946     o->in2 = load_freg(get_field(s, r2) + 2);
5947 }
5948 #define SPEC_in2_x2l SPEC_r2_f128
5949 
5950 static void in2_ra2(DisasContext *s, DisasOps *o)
5951 {
5952     int r2 = get_field(s, r2);
5953 
5954     /* Note: *don't* treat !r2 as 0, use the reg value. */
5955     o->in2 = tcg_temp_new_i64();
5956     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5957 }
5958 #define SPEC_in2_ra2 0
5959 
5960 static void in2_a2(DisasContext *s, DisasOps *o)
5961 {
5962     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5963     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5964 }
5965 #define SPEC_in2_a2 0
5966 
5967 static void in2_ri2(DisasContext *s, DisasOps *o)
5968 {
5969     o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(s, i2) * 2);
5970 }
5971 #define SPEC_in2_ri2 0
5972 
5973 static void in2_sh(DisasContext *s, DisasOps *o)
5974 {
5975     int b2 = get_field(s, b2);
5976     int d2 = get_field(s, d2);
5977 
5978     if (b2 == 0) {
5979         o->in2 = tcg_const_i64(d2 & 0x3f);
5980     } else {
5981         o->in2 = get_address(s, 0, b2, d2);
5982         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5983     }
5984 }
5985 #define SPEC_in2_sh 0
5986 
5987 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5988 {
5989     in2_a2(s, o);
5990     tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5991 }
5992 #define SPEC_in2_m2_8u 0
5993 
5994 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5995 {
5996     in2_a2(s, o);
5997     tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5998 }
5999 #define SPEC_in2_m2_16s 0
6000 
6001 static void in2_m2_16u(DisasContext *s, DisasOps *o)
6002 {
6003     in2_a2(s, o);
6004     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6005 }
6006 #define SPEC_in2_m2_16u 0
6007 
6008 static void in2_m2_32s(DisasContext *s, DisasOps *o)
6009 {
6010     in2_a2(s, o);
6011     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6012 }
6013 #define SPEC_in2_m2_32s 0
6014 
6015 static void in2_m2_32u(DisasContext *s, DisasOps *o)
6016 {
6017     in2_a2(s, o);
6018     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6019 }
6020 #define SPEC_in2_m2_32u 0
6021 
6022 #ifndef CONFIG_USER_ONLY
6023 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
6024 {
6025     in2_a2(s, o);
6026     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
6027 }
6028 #define SPEC_in2_m2_32ua 0
6029 #endif
6030 
6031 static void in2_m2_64(DisasContext *s, DisasOps *o)
6032 {
6033     in2_a2(s, o);
6034     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6035 }
6036 #define SPEC_in2_m2_64 0
6037 
6038 static void in2_m2_64w(DisasContext *s, DisasOps *o)
6039 {
6040     in2_a2(s, o);
6041     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6042     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
6043 }
6044 #define SPEC_in2_m2_64w 0
6045 
6046 #ifndef CONFIG_USER_ONLY
6047 static void in2_m2_64a(DisasContext *s, DisasOps *o)
6048 {
6049     in2_a2(s, o);
6050     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
6051 }
6052 #define SPEC_in2_m2_64a 0
6053 #endif
6054 
6055 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
6056 {
6057     in2_ri2(s, o);
6058     tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
6059 }
6060 #define SPEC_in2_mri2_16u 0
6061 
6062 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
6063 {
6064     in2_ri2(s, o);
6065     tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
6066 }
6067 #define SPEC_in2_mri2_32s 0
6068 
6069 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
6070 {
6071     in2_ri2(s, o);
6072     tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
6073 }
6074 #define SPEC_in2_mri2_32u 0
6075 
6076 static void in2_mri2_64(DisasContext *s, DisasOps *o)
6077 {
6078     in2_ri2(s, o);
6079     tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
6080 }
6081 #define SPEC_in2_mri2_64 0
6082 
6083 static void in2_i2(DisasContext *s, DisasOps *o)
6084 {
6085     o->in2 = tcg_const_i64(get_field(s, i2));
6086 }
6087 #define SPEC_in2_i2 0
6088 
6089 static void in2_i2_8u(DisasContext *s, DisasOps *o)
6090 {
6091     o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
6092 }
6093 #define SPEC_in2_i2_8u 0
6094 
6095 static void in2_i2_16u(DisasContext *s, DisasOps *o)
6096 {
6097     o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
6098 }
6099 #define SPEC_in2_i2_16u 0
6100 
6101 static void in2_i2_32u(DisasContext *s, DisasOps *o)
6102 {
6103     o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
6104 }
6105 #define SPEC_in2_i2_32u 0
6106 
6107 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
6108 {
6109     uint64_t i2 = (uint16_t)get_field(s, i2);
6110     o->in2 = tcg_const_i64(i2 << s->insn->data);
6111 }
6112 #define SPEC_in2_i2_16u_shl 0
6113 
6114 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
6115 {
6116     uint64_t i2 = (uint32_t)get_field(s, i2);
6117     o->in2 = tcg_const_i64(i2 << s->insn->data);
6118 }
6119 #define SPEC_in2_i2_32u_shl 0
6120 
6121 #ifndef CONFIG_USER_ONLY
6122 static void in2_insn(DisasContext *s, DisasOps *o)
6123 {
6124     o->in2 = tcg_const_i64(s->fields.raw_insn);
6125 }
6126 #define SPEC_in2_insn 0
6127 #endif
6128 
6129 /* ====================================================================== */
6130 
6131 /* Find opc within the table of insns.  This is formulated as a switch
6132    statement so that (1) we get compile-time notice of cut-paste errors
6133    for duplicated opcodes, and (2) the compiler generates the binary
6134    search tree, rather than us having to post-process the table.  */
6135 
6136 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
6137     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
6138 
6139 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
6140     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6141 
6142 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6143     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6144 
6145 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6146 
6147 enum DisasInsnEnum {
6148 #include "insn-data.h.inc"
6149 };
6150 
6151 #undef E
6152 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6153     .opc = OPC,                                                             \
6154     .flags = FL,                                                            \
6155     .fmt = FMT_##FT,                                                        \
6156     .fac = FAC_##FC,                                                        \
6157     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6158     .name = #NM,                                                            \
6159     .help_in1 = in1_##I1,                                                   \
6160     .help_in2 = in2_##I2,                                                   \
6161     .help_prep = prep_##P,                                                  \
6162     .help_wout = wout_##W,                                                  \
6163     .help_cout = cout_##CC,                                                 \
6164     .help_op = op_##OP,                                                     \
6165     .data = D                                                               \
6166  },
6167 
6168 /* Allow 0 to be used for NULL in the table below.  */
6169 #define in1_0  NULL
6170 #define in2_0  NULL
6171 #define prep_0  NULL
6172 #define wout_0  NULL
6173 #define cout_0  NULL
6174 #define op_0  NULL
6175 
6176 #define SPEC_in1_0 0
6177 #define SPEC_in2_0 0
6178 #define SPEC_prep_0 0
6179 #define SPEC_wout_0 0
6180 
6181 /* Give smaller names to the various facilities.  */
6182 #define FAC_Z           S390_FEAT_ZARCH
6183 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6184 #define FAC_DFP         S390_FEAT_DFP
6185 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6186 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6187 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6188 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6189 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6190 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6191 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6192 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6193 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6194 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6195 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6196 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6197 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6198 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6199 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6200 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6201 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6202 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6203 #define FAC_SFLE        S390_FEAT_STFLE
6204 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6205 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6206 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6207 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6208 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6209 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6210 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6211 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6212 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6213 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6214 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6215 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6216 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6217 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6218 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6219 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6220 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6221 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6222 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6223 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6224 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6225 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6226 
6227 static const DisasInsn insn_info[] = {
6228 #include "insn-data.h.inc"
6229 };
6230 
6231 #undef E
6232 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6233     case OPC: return &insn_info[insn_ ## NM];
6234 
6235 static const DisasInsn *lookup_opc(uint16_t opc)
6236 {
6237     switch (opc) {
6238 #include "insn-data.h.inc"
6239     default:
6240         return NULL;
6241     }
6242 }
6243 
6244 #undef F
6245 #undef E
6246 #undef D
6247 #undef C
6248 
6249 /* Extract a field from the insn.  The INSN should be left-aligned in
6250    the uint64_t so that we can more easily utilize the big-bit-endian
6251    definitions we extract from the Principals of Operation.  */
6252 
6253 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6254 {
6255     uint32_t r, m;
6256 
6257     if (f->size == 0) {
6258         return;
6259     }
6260 
6261     /* Zero extract the field from the insn.  */
6262     r = (insn << f->beg) >> (64 - f->size);
6263 
6264     /* Sign-extend, or un-swap the field as necessary.  */
6265     switch (f->type) {
6266     case 0: /* unsigned */
6267         break;
6268     case 1: /* signed */
6269         assert(f->size <= 32);
6270         m = 1u << (f->size - 1);
6271         r = (r ^ m) - m;
6272         break;
6273     case 2: /* dl+dh split, signed 20 bit. */
6274         r = ((int8_t)r << 12) | (r >> 8);
6275         break;
6276     case 3: /* MSB stored in RXB */
6277         g_assert(f->size == 4);
6278         switch (f->beg) {
6279         case 8:
6280             r |= extract64(insn, 63 - 36, 1) << 4;
6281             break;
6282         case 12:
6283             r |= extract64(insn, 63 - 37, 1) << 4;
6284             break;
6285         case 16:
6286             r |= extract64(insn, 63 - 38, 1) << 4;
6287             break;
6288         case 32:
6289             r |= extract64(insn, 63 - 39, 1) << 4;
6290             break;
6291         default:
6292             g_assert_not_reached();
6293         }
6294         break;
6295     default:
6296         abort();
6297     }
6298 
6299     /*
6300      * Validate that the "compressed" encoding we selected above is valid.
6301      * I.e. we haven't made two different original fields overlap.
6302      */
6303     assert(((o->presentC >> f->indexC) & 1) == 0);
6304     o->presentC |= 1 << f->indexC;
6305     o->presentO |= 1 << f->indexO;
6306 
6307     o->c[f->indexC] = r;
6308 }
6309 
6310 /* Lookup the insn at the current PC, extracting the operands into O and
6311    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6312 
6313 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6314 {
6315     uint64_t insn, pc = s->base.pc_next;
6316     int op, op2, ilen;
6317     const DisasInsn *info;
6318 
6319     if (unlikely(s->ex_value)) {
6320         /* Drop the EX data now, so that it's clear on exception paths.  */
6321         TCGv_i64 zero = tcg_const_i64(0);
6322         int i;
6323         tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
6324         tcg_temp_free_i64(zero);
6325 
6326         /* Extract the values saved by EXECUTE.  */
6327         insn = s->ex_value & 0xffffffffffff0000ull;
6328         ilen = s->ex_value & 0xf;
6329         /* register insn bytes with translator so plugins work */
6330         for (i = 0; i < ilen; i++) {
6331             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6332             translator_fake_ldb(byte, pc + i);
6333         }
6334         op = insn >> 56;
6335     } else {
6336         insn = ld_code2(env, s, pc);
6337         op = (insn >> 8) & 0xff;
6338         ilen = get_ilen(op);
6339         switch (ilen) {
6340         case 2:
6341             insn = insn << 48;
6342             break;
6343         case 4:
6344             insn = ld_code4(env, s, pc) << 32;
6345             break;
6346         case 6:
6347             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6348             break;
6349         default:
6350             g_assert_not_reached();
6351         }
6352     }
6353     s->pc_tmp = s->base.pc_next + ilen;
6354     s->ilen = ilen;
6355 
6356     /* We can't actually determine the insn format until we've looked up
6357        the full insn opcode.  Which we can't do without locating the
6358        secondary opcode.  Assume by default that OP2 is at bit 40; for
6359        those smaller insns that don't actually have a secondary opcode
6360        this will correctly result in OP2 = 0. */
6361     switch (op) {
6362     case 0x01: /* E */
6363     case 0x80: /* S */
6364     case 0x82: /* S */
6365     case 0x93: /* S */
6366     case 0xb2: /* S, RRF, RRE, IE */
6367     case 0xb3: /* RRE, RRD, RRF */
6368     case 0xb9: /* RRE, RRF */
6369     case 0xe5: /* SSE, SIL */
6370         op2 = (insn << 8) >> 56;
6371         break;
6372     case 0xa5: /* RI */
6373     case 0xa7: /* RI */
6374     case 0xc0: /* RIL */
6375     case 0xc2: /* RIL */
6376     case 0xc4: /* RIL */
6377     case 0xc6: /* RIL */
6378     case 0xc8: /* SSF */
6379     case 0xcc: /* RIL */
6380         op2 = (insn << 12) >> 60;
6381         break;
6382     case 0xc5: /* MII */
6383     case 0xc7: /* SMI */
6384     case 0xd0 ... 0xdf: /* SS */
6385     case 0xe1: /* SS */
6386     case 0xe2: /* SS */
6387     case 0xe8: /* SS */
6388     case 0xe9: /* SS */
6389     case 0xea: /* SS */
6390     case 0xee ... 0xf3: /* SS */
6391     case 0xf8 ... 0xfd: /* SS */
6392         op2 = 0;
6393         break;
6394     default:
6395         op2 = (insn << 40) >> 56;
6396         break;
6397     }
6398 
6399     memset(&s->fields, 0, sizeof(s->fields));
6400     s->fields.raw_insn = insn;
6401     s->fields.op = op;
6402     s->fields.op2 = op2;
6403 
6404     /* Lookup the instruction.  */
6405     info = lookup_opc(op << 8 | op2);
6406     s->insn = info;
6407 
6408     /* If we found it, extract the operands.  */
6409     if (info != NULL) {
6410         DisasFormat fmt = info->fmt;
6411         int i;
6412 
6413         for (i = 0; i < NUM_C_FIELD; ++i) {
6414             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6415         }
6416     }
6417     return info;
6418 }
6419 
6420 static bool is_afp_reg(int reg)
6421 {
6422     return reg % 2 || reg > 6;
6423 }
6424 
6425 static bool is_fp_pair(int reg)
6426 {
6427     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6428     return !(reg & 0x2);
6429 }
6430 
6431 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6432 {
6433     const DisasInsn *insn;
6434     DisasJumpType ret = DISAS_NEXT;
6435     DisasOps o = {};
6436     bool icount = false;
6437 
6438     /* Search for the insn in the table.  */
6439     insn = extract_insn(env, s);
6440 
6441     /* Update insn_start now that we know the ILEN.  */
6442     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6443 
6444     /* Not found means unimplemented/illegal opcode.  */
6445     if (insn == NULL) {
6446         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6447                       s->fields.op, s->fields.op2);
6448         gen_illegal_opcode(s);
6449         ret = DISAS_NORETURN;
6450         goto out;
6451     }
6452 
6453 #ifndef CONFIG_USER_ONLY
6454     if (s->base.tb->flags & FLAG_MASK_PER) {
6455         TCGv_i64 addr = tcg_const_i64(s->base.pc_next);
6456         gen_helper_per_ifetch(cpu_env, addr);
6457         tcg_temp_free_i64(addr);
6458     }
6459 #endif
6460 
6461     /* process flags */
6462     if (insn->flags) {
6463         /* privileged instruction */
6464         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6465             gen_program_exception(s, PGM_PRIVILEGED);
6466             ret = DISAS_NORETURN;
6467             goto out;
6468         }
6469 
6470         /* if AFP is not enabled, instructions and registers are forbidden */
6471         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6472             uint8_t dxc = 0;
6473 
6474             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6475                 dxc = 1;
6476             }
6477             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6478                 dxc = 1;
6479             }
6480             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6481                 dxc = 1;
6482             }
6483             if (insn->flags & IF_BFP) {
6484                 dxc = 2;
6485             }
6486             if (insn->flags & IF_DFP) {
6487                 dxc = 3;
6488             }
6489             if (insn->flags & IF_VEC) {
6490                 dxc = 0xfe;
6491             }
6492             if (dxc) {
6493                 gen_data_exception(dxc);
6494                 ret = DISAS_NORETURN;
6495                 goto out;
6496             }
6497         }
6498 
6499         /* if vector instructions not enabled, executing them is forbidden */
6500         if (insn->flags & IF_VEC) {
6501             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6502                 gen_data_exception(0xfe);
6503                 ret = DISAS_NORETURN;
6504                 goto out;
6505             }
6506         }
6507 
6508         /* input/output is the special case for icount mode */
6509         if (unlikely(insn->flags & IF_IO)) {
6510             icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6511             if (icount) {
6512                 gen_io_start();
6513             }
6514         }
6515     }
6516 
6517     /* Check for insn specification exceptions.  */
6518     if (insn->spec) {
6519         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6520             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6521             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6522             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6523             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6524             gen_program_exception(s, PGM_SPECIFICATION);
6525             ret = DISAS_NORETURN;
6526             goto out;
6527         }
6528     }
6529 
6530     /* Implement the instruction.  */
6531     if (insn->help_in1) {
6532         insn->help_in1(s, &o);
6533     }
6534     if (insn->help_in2) {
6535         insn->help_in2(s, &o);
6536     }
6537     if (insn->help_prep) {
6538         insn->help_prep(s, &o);
6539     }
6540     if (insn->help_op) {
6541         ret = insn->help_op(s, &o);
6542     }
6543     if (ret != DISAS_NORETURN) {
6544         if (insn->help_wout) {
6545             insn->help_wout(s, &o);
6546         }
6547         if (insn->help_cout) {
6548             insn->help_cout(s, &o);
6549         }
6550     }
6551 
6552     /* Free any temporaries created by the helpers.  */
6553     if (o.out && !o.g_out) {
6554         tcg_temp_free_i64(o.out);
6555     }
6556     if (o.out2 && !o.g_out2) {
6557         tcg_temp_free_i64(o.out2);
6558     }
6559     if (o.in1 && !o.g_in1) {
6560         tcg_temp_free_i64(o.in1);
6561     }
6562     if (o.in2 && !o.g_in2) {
6563         tcg_temp_free_i64(o.in2);
6564     }
6565     if (o.addr1) {
6566         tcg_temp_free_i64(o.addr1);
6567     }
6568 
6569     /* io should be the last instruction in tb when icount is enabled */
6570     if (unlikely(icount && ret == DISAS_NEXT)) {
6571         ret = DISAS_TOO_MANY;
6572     }
6573 
6574 #ifndef CONFIG_USER_ONLY
6575     if (s->base.tb->flags & FLAG_MASK_PER) {
6576         /* An exception might be triggered, save PSW if not already done.  */
6577         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6578             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6579         }
6580 
6581         /* Call the helper to check for a possible PER exception.  */
6582         gen_helper_per_check_exception(cpu_env);
6583     }
6584 #endif
6585 
6586 out:
6587     /* Advance to the next instruction.  */
6588     s->base.pc_next = s->pc_tmp;
6589     return ret;
6590 }
6591 
6592 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6593 {
6594     DisasContext *dc = container_of(dcbase, DisasContext, base);
6595 
6596     /* 31-bit mode */
6597     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6598         dc->base.pc_first &= 0x7fffffff;
6599         dc->base.pc_next = dc->base.pc_first;
6600     }
6601 
6602     dc->cc_op = CC_OP_DYNAMIC;
6603     dc->ex_value = dc->base.tb->cs_base;
6604     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6605 }
6606 
6607 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6608 {
6609 }
6610 
6611 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6612 {
6613     DisasContext *dc = container_of(dcbase, DisasContext, base);
6614 
6615     /* Delay the set of ilen until we've read the insn. */
6616     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6617     dc->insn_start = tcg_last_op();
6618 }
6619 
6620 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6621                                 uint64_t pc)
6622 {
6623     uint64_t insn = cpu_lduw_code(env, pc);
6624 
6625     return pc + get_ilen((insn >> 8) & 0xff);
6626 }
6627 
6628 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6629 {
6630     CPUS390XState *env = cs->env_ptr;
6631     DisasContext *dc = container_of(dcbase, DisasContext, base);
6632 
6633     dc->base.is_jmp = translate_one(env, dc);
6634     if (dc->base.is_jmp == DISAS_NEXT) {
6635         if (dc->ex_value ||
6636             !is_same_page(dcbase, dc->base.pc_next) ||
6637             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6638             dc->base.is_jmp = DISAS_TOO_MANY;
6639         }
6640     }
6641 }
6642 
6643 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6644 {
6645     DisasContext *dc = container_of(dcbase, DisasContext, base);
6646 
6647     switch (dc->base.is_jmp) {
6648     case DISAS_NORETURN:
6649         break;
6650     case DISAS_TOO_MANY:
6651         update_psw_addr(dc);
6652         /* FALLTHRU */
6653     case DISAS_PC_UPDATED:
6654         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6655            cc op type is in env */
6656         update_cc_op(dc);
6657         /* FALLTHRU */
6658     case DISAS_PC_CC_UPDATED:
6659         /* Exit the TB, either by raising a debug exception or by return.  */
6660         if (dc->exit_to_mainloop) {
6661             tcg_gen_exit_tb(NULL, 0);
6662         } else {
6663             tcg_gen_lookup_and_goto_ptr();
6664         }
6665         break;
6666     default:
6667         g_assert_not_reached();
6668     }
6669 }
6670 
6671 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6672                                CPUState *cs, FILE *logfile)
6673 {
6674     DisasContext *dc = container_of(dcbase, DisasContext, base);
6675 
6676     if (unlikely(dc->ex_value)) {
6677         /* ??? Unfortunately target_disas can't use host memory.  */
6678         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6679     } else {
6680         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6681         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6682     }
6683 }
6684 
6685 static const TranslatorOps s390x_tr_ops = {
6686     .init_disas_context = s390x_tr_init_disas_context,
6687     .tb_start           = s390x_tr_tb_start,
6688     .insn_start         = s390x_tr_insn_start,
6689     .translate_insn     = s390x_tr_translate_insn,
6690     .tb_stop            = s390x_tr_tb_stop,
6691     .disas_log          = s390x_tr_disas_log,
6692 };
6693 
6694 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
6695                            target_ulong pc, void *host_pc)
6696 {
6697     DisasContext dc;
6698 
6699     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6700 }
6701 
6702 void s390x_restore_state_to_opc(CPUState *cs,
6703                                 const TranslationBlock *tb,
6704                                 const uint64_t *data)
6705 {
6706     S390CPU *cpu = S390_CPU(cs);
6707     CPUS390XState *env = &cpu->env;
6708     int cc_op = data[1];
6709 
6710     env->psw.addr = data[0];
6711 
6712     /* Update the CC opcode if it is not already up-to-date.  */
6713     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6714         env->cc_op = cc_op;
6715     }
6716 
6717     /* Record ILEN.  */
6718     env->int_pgm_ilen = data[2];
6719 }
6720