xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision 1a16ce64)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43 
44 #include "exec/translator.h"
45 #include "exec/log.h"
46 #include "qemu/atomic128.h"
47 
48 #define HELPER_H "helper.h"
49 #include "exec/helper-info.c.inc"
50 #undef  HELPER_H
51 
52 
53 /* Information that (most) every instruction needs to manipulate.  */
54 typedef struct DisasContext DisasContext;
55 typedef struct DisasInsn DisasInsn;
56 typedef struct DisasFields DisasFields;
57 
58 /*
59  * Define a structure to hold the decoded fields.  We'll store each inside
60  * an array indexed by an enum.  In order to conserve memory, we'll arrange
61  * for fields that do not exist at the same time to overlap, thus the "C"
62  * for compact.  For checking purposes there is an "O" for original index
63  * as well that will be applied to availability bitmaps.
64  */
65 
66 enum DisasFieldIndexO {
67     FLD_O_r1,
68     FLD_O_r2,
69     FLD_O_r3,
70     FLD_O_m1,
71     FLD_O_m3,
72     FLD_O_m4,
73     FLD_O_m5,
74     FLD_O_m6,
75     FLD_O_b1,
76     FLD_O_b2,
77     FLD_O_b4,
78     FLD_O_d1,
79     FLD_O_d2,
80     FLD_O_d4,
81     FLD_O_x2,
82     FLD_O_l1,
83     FLD_O_l2,
84     FLD_O_i1,
85     FLD_O_i2,
86     FLD_O_i3,
87     FLD_O_i4,
88     FLD_O_i5,
89     FLD_O_v1,
90     FLD_O_v2,
91     FLD_O_v3,
92     FLD_O_v4,
93 };
94 
95 enum DisasFieldIndexC {
96     FLD_C_r1 = 0,
97     FLD_C_m1 = 0,
98     FLD_C_b1 = 0,
99     FLD_C_i1 = 0,
100     FLD_C_v1 = 0,
101 
102     FLD_C_r2 = 1,
103     FLD_C_b2 = 1,
104     FLD_C_i2 = 1,
105 
106     FLD_C_r3 = 2,
107     FLD_C_m3 = 2,
108     FLD_C_i3 = 2,
109     FLD_C_v3 = 2,
110 
111     FLD_C_m4 = 3,
112     FLD_C_b4 = 3,
113     FLD_C_i4 = 3,
114     FLD_C_l1 = 3,
115     FLD_C_v4 = 3,
116 
117     FLD_C_i5 = 4,
118     FLD_C_d1 = 4,
119     FLD_C_m5 = 4,
120 
121     FLD_C_d2 = 5,
122     FLD_C_m6 = 5,
123 
124     FLD_C_d4 = 6,
125     FLD_C_x2 = 6,
126     FLD_C_l2 = 6,
127     FLD_C_v2 = 6,
128 
129     NUM_C_FIELD = 7
130 };
131 
132 struct DisasFields {
133     uint64_t raw_insn;
134     unsigned op:8;
135     unsigned op2:8;
136     unsigned presentC:16;
137     unsigned int presentO;
138     int c[NUM_C_FIELD];
139 };
140 
141 struct DisasContext {
142     DisasContextBase base;
143     const DisasInsn *insn;
144     TCGOp *insn_start;
145     DisasFields fields;
146     uint64_t ex_value;
147     /*
148      * During translate_one(), pc_tmp is used to determine the instruction
149      * to be executed after base.pc_next - e.g. next sequential instruction
150      * or a branch target.
151      */
152     uint64_t pc_tmp;
153     uint32_t ilen;
154     enum cc_op cc_op;
155     bool exit_to_mainloop;
156 };
157 
158 /* Information carried about a condition to be evaluated.  */
159 typedef struct {
160     TCGCond cond:8;
161     bool is_64;
162     union {
163         struct { TCGv_i64 a, b; } s64;
164         struct { TCGv_i32 a, b; } s32;
165     } u;
166 } DisasCompare;
167 
168 #ifdef DEBUG_INLINE_BRANCHES
169 static uint64_t inline_branch_hit[CC_OP_MAX];
170 static uint64_t inline_branch_miss[CC_OP_MAX];
171 #endif
172 
173 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 {
175     if (s->base.tb->flags & FLAG_MASK_32) {
176         if (s->base.tb->flags & FLAG_MASK_64) {
177             tcg_gen_movi_i64(out, pc);
178             return;
179         }
180         pc |= 0x80000000;
181     }
182     assert(!(s->base.tb->flags & FLAG_MASK_64));
183     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
184 }
185 
186 static TCGv_i64 psw_addr;
187 static TCGv_i64 psw_mask;
188 static TCGv_i64 gbea;
189 
190 static TCGv_i32 cc_op;
191 static TCGv_i64 cc_src;
192 static TCGv_i64 cc_dst;
193 static TCGv_i64 cc_vr;
194 
195 static char cpu_reg_names[16][4];
196 static TCGv_i64 regs[16];
197 
198 void s390x_translate_init(void)
199 {
200     int i;
201 
202     psw_addr = tcg_global_mem_new_i64(cpu_env,
203                                       offsetof(CPUS390XState, psw.addr),
204                                       "psw_addr");
205     psw_mask = tcg_global_mem_new_i64(cpu_env,
206                                       offsetof(CPUS390XState, psw.mask),
207                                       "psw_mask");
208     gbea = tcg_global_mem_new_i64(cpu_env,
209                                   offsetof(CPUS390XState, gbea),
210                                   "gbea");
211 
212     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
213                                    "cc_op");
214     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
215                                     "cc_src");
216     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
217                                     "cc_dst");
218     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
219                                    "cc_vr");
220 
221     for (i = 0; i < 16; i++) {
222         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
223         regs[i] = tcg_global_mem_new(cpu_env,
224                                      offsetof(CPUS390XState, regs[i]),
225                                      cpu_reg_names[i]);
226     }
227 }
228 
229 static inline int vec_full_reg_offset(uint8_t reg)
230 {
231     g_assert(reg < 32);
232     return offsetof(CPUS390XState, vregs[reg][0]);
233 }
234 
235 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
236 {
237     /* Convert element size (es) - e.g. MO_8 - to bytes */
238     const uint8_t bytes = 1 << es;
239     int offs = enr * bytes;
240 
241     /*
242      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
243      * of the 16 byte vector, on both, little and big endian systems.
244      *
245      * Big Endian (target/possible host)
246      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
247      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
248      * W:  [             0][             1] - [             2][             3]
249      * DW: [                             0] - [                             1]
250      *
251      * Little Endian (possible host)
252      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
253      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
254      * W:  [             1][             0] - [             3][             2]
255      * DW: [                             0] - [                             1]
256      *
257      * For 16 byte elements, the two 8 byte halves will not form a host
258      * int128 if the host is little endian, since they're in the wrong order.
259      * Some operations (e.g. xor) do not care. For operations like addition,
260      * the two 8 byte elements have to be loaded separately. Let's force all
261      * 16 byte operations to handle it in a special way.
262      */
263     g_assert(es <= MO_64);
264 #if !HOST_BIG_ENDIAN
265     offs ^= (8 - bytes);
266 #endif
267     return offs + vec_full_reg_offset(reg);
268 }
269 
270 static inline int freg64_offset(uint8_t reg)
271 {
272     g_assert(reg < 16);
273     return vec_reg_offset(reg, 0, MO_64);
274 }
275 
276 static inline int freg32_offset(uint8_t reg)
277 {
278     g_assert(reg < 16);
279     return vec_reg_offset(reg, 0, MO_32);
280 }
281 
282 static TCGv_i64 load_reg(int reg)
283 {
284     TCGv_i64 r = tcg_temp_new_i64();
285     tcg_gen_mov_i64(r, regs[reg]);
286     return r;
287 }
288 
289 static TCGv_i64 load_freg(int reg)
290 {
291     TCGv_i64 r = tcg_temp_new_i64();
292 
293     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
294     return r;
295 }
296 
297 static TCGv_i64 load_freg32_i64(int reg)
298 {
299     TCGv_i64 r = tcg_temp_new_i64();
300 
301     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
302     return r;
303 }
304 
305 static TCGv_i128 load_freg_128(int reg)
306 {
307     TCGv_i64 h = load_freg(reg);
308     TCGv_i64 l = load_freg(reg + 2);
309     TCGv_i128 r = tcg_temp_new_i128();
310 
311     tcg_gen_concat_i64_i128(r, l, h);
312     return r;
313 }
314 
315 static void store_reg(int reg, TCGv_i64 v)
316 {
317     tcg_gen_mov_i64(regs[reg], v);
318 }
319 
320 static void store_freg(int reg, TCGv_i64 v)
321 {
322     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
323 }
324 
325 static void store_reg32_i64(int reg, TCGv_i64 v)
326 {
327     /* 32 bit register writes keep the upper half */
328     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
329 }
330 
331 static void store_reg32h_i64(int reg, TCGv_i64 v)
332 {
333     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
334 }
335 
336 static void store_freg32_i64(int reg, TCGv_i64 v)
337 {
338     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
339 }
340 
341 static void update_psw_addr(DisasContext *s)
342 {
343     /* psw.addr */
344     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 }
346 
347 static void per_branch(DisasContext *s, bool to_next)
348 {
349 #ifndef CONFIG_USER_ONLY
350     tcg_gen_movi_i64(gbea, s->base.pc_next);
351 
352     if (s->base.tb->flags & FLAG_MASK_PER) {
353         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
354         gen_helper_per_branch(cpu_env, gbea, next_pc);
355     }
356 #endif
357 }
358 
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360                             TCGv_i64 arg1, TCGv_i64 arg2)
361 {
362 #ifndef CONFIG_USER_ONLY
363     if (s->base.tb->flags & FLAG_MASK_PER) {
364         TCGLabel *lab = gen_new_label();
365         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
366 
367         tcg_gen_movi_i64(gbea, s->base.pc_next);
368         gen_helper_per_branch(cpu_env, gbea, psw_addr);
369 
370         gen_set_label(lab);
371     } else {
372         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
373         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
374     }
375 #endif
376 }
377 
378 static void per_breaking_event(DisasContext *s)
379 {
380     tcg_gen_movi_i64(gbea, s->base.pc_next);
381 }
382 
383 static void update_cc_op(DisasContext *s)
384 {
385     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
386         tcg_gen_movi_i32(cc_op, s->cc_op);
387     }
388 }
389 
390 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
391                                 uint64_t pc)
392 {
393     return (uint64_t)translator_lduw(env, &s->base, pc);
394 }
395 
396 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
397                                 uint64_t pc)
398 {
399     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
400 }
401 
402 static int get_mem_index(DisasContext *s)
403 {
404 #ifdef CONFIG_USER_ONLY
405     return MMU_USER_IDX;
406 #else
407     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408         return MMU_REAL_IDX;
409     }
410 
411     switch (s->base.tb->flags & FLAG_MASK_ASC) {
412     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413         return MMU_PRIMARY_IDX;
414     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_SECONDARY_IDX;
416     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417         return MMU_HOME_IDX;
418     default:
419         g_assert_not_reached();
420         break;
421     }
422 #endif
423 }
424 
425 static void gen_exception(int excp)
426 {
427     gen_helper_exception(cpu_env, tcg_constant_i32(excp));
428 }
429 
430 static void gen_program_exception(DisasContext *s, int code)
431 {
432     /* Remember what pgm exception this was.  */
433     tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
434                    offsetof(CPUS390XState, int_pgm_code));
435 
436     tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
437                    offsetof(CPUS390XState, int_pgm_ilen));
438 
439     /* update the psw */
440     update_psw_addr(s);
441 
442     /* Save off cc.  */
443     update_cc_op(s);
444 
445     /* Trigger exception.  */
446     gen_exception(EXCP_PGM);
447 }
448 
449 static inline void gen_illegal_opcode(DisasContext *s)
450 {
451     gen_program_exception(s, PGM_OPERATION);
452 }
453 
454 static inline void gen_data_exception(uint8_t dxc)
455 {
456     gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
457 }
458 
459 static inline void gen_trap(DisasContext *s)
460 {
461     /* Set DXC to 0xff */
462     gen_data_exception(0xff);
463 }
464 
465 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
466                                   int64_t imm)
467 {
468     tcg_gen_addi_i64(dst, src, imm);
469     if (!(s->base.tb->flags & FLAG_MASK_64)) {
470         if (s->base.tb->flags & FLAG_MASK_32) {
471             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
472         } else {
473             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
474         }
475     }
476 }
477 
478 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
479 {
480     TCGv_i64 tmp = tcg_temp_new_i64();
481 
482     /*
483      * Note that d2 is limited to 20 bits, signed.  If we crop negative
484      * displacements early we create larger immediate addends.
485      */
486     if (b2 && x2) {
487         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
488         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
489     } else if (b2) {
490         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
491     } else if (x2) {
492         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
493     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
494         if (s->base.tb->flags & FLAG_MASK_32) {
495             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
496         } else {
497             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
498         }
499     } else {
500         tcg_gen_movi_i64(tmp, d2);
501     }
502 
503     return tmp;
504 }
505 
506 static inline bool live_cc_data(DisasContext *s)
507 {
508     return (s->cc_op != CC_OP_DYNAMIC
509             && s->cc_op != CC_OP_STATIC
510             && s->cc_op > 3);
511 }
512 
513 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
514 {
515     if (live_cc_data(s)) {
516         tcg_gen_discard_i64(cc_src);
517         tcg_gen_discard_i64(cc_dst);
518         tcg_gen_discard_i64(cc_vr);
519     }
520     s->cc_op = CC_OP_CONST0 + val;
521 }
522 
523 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
524 {
525     if (live_cc_data(s)) {
526         tcg_gen_discard_i64(cc_src);
527         tcg_gen_discard_i64(cc_vr);
528     }
529     tcg_gen_mov_i64(cc_dst, dst);
530     s->cc_op = op;
531 }
532 
533 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
534                                   TCGv_i64 dst)
535 {
536     if (live_cc_data(s)) {
537         tcg_gen_discard_i64(cc_vr);
538     }
539     tcg_gen_mov_i64(cc_src, src);
540     tcg_gen_mov_i64(cc_dst, dst);
541     s->cc_op = op;
542 }
543 
544 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
545                                   TCGv_i64 dst, TCGv_i64 vr)
546 {
547     tcg_gen_mov_i64(cc_src, src);
548     tcg_gen_mov_i64(cc_dst, dst);
549     tcg_gen_mov_i64(cc_vr, vr);
550     s->cc_op = op;
551 }
552 
553 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
554 {
555     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
556 }
557 
558 /* CC value is in env->cc_op */
559 static void set_cc_static(DisasContext *s)
560 {
561     if (live_cc_data(s)) {
562         tcg_gen_discard_i64(cc_src);
563         tcg_gen_discard_i64(cc_dst);
564         tcg_gen_discard_i64(cc_vr);
565     }
566     s->cc_op = CC_OP_STATIC;
567 }
568 
569 /* calculates cc into cc_op */
570 static void gen_op_calc_cc(DisasContext *s)
571 {
572     TCGv_i32 local_cc_op = NULL;
573     TCGv_i64 dummy = NULL;
574 
575     switch (s->cc_op) {
576     default:
577         dummy = tcg_constant_i64(0);
578         /* FALLTHRU */
579     case CC_OP_ADD_64:
580     case CC_OP_SUB_64:
581     case CC_OP_ADD_32:
582     case CC_OP_SUB_32:
583         local_cc_op = tcg_constant_i32(s->cc_op);
584         break;
585     case CC_OP_CONST0:
586     case CC_OP_CONST1:
587     case CC_OP_CONST2:
588     case CC_OP_CONST3:
589     case CC_OP_STATIC:
590     case CC_OP_DYNAMIC:
591         break;
592     }
593 
594     switch (s->cc_op) {
595     case CC_OP_CONST0:
596     case CC_OP_CONST1:
597     case CC_OP_CONST2:
598     case CC_OP_CONST3:
599         /* s->cc_op is the cc value */
600         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
601         break;
602     case CC_OP_STATIC:
603         /* env->cc_op already is the cc value */
604         break;
605     case CC_OP_NZ:
606         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
607         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
608         break;
609     case CC_OP_ABS_64:
610     case CC_OP_NABS_64:
611     case CC_OP_ABS_32:
612     case CC_OP_NABS_32:
613     case CC_OP_LTGT0_32:
614     case CC_OP_LTGT0_64:
615     case CC_OP_COMP_32:
616     case CC_OP_COMP_64:
617     case CC_OP_NZ_F32:
618     case CC_OP_NZ_F64:
619     case CC_OP_FLOGR:
620     case CC_OP_LCBB:
621     case CC_OP_MULS_32:
622         /* 1 argument */
623         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
624         break;
625     case CC_OP_ADDU:
626     case CC_OP_ICM:
627     case CC_OP_LTGT_32:
628     case CC_OP_LTGT_64:
629     case CC_OP_LTUGTU_32:
630     case CC_OP_LTUGTU_64:
631     case CC_OP_TM_32:
632     case CC_OP_TM_64:
633     case CC_OP_SLA:
634     case CC_OP_SUBU:
635     case CC_OP_NZ_F128:
636     case CC_OP_VC:
637     case CC_OP_MULS_64:
638         /* 2 arguments */
639         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
640         break;
641     case CC_OP_ADD_64:
642     case CC_OP_SUB_64:
643     case CC_OP_ADD_32:
644     case CC_OP_SUB_32:
645         /* 3 arguments */
646         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
647         break;
648     case CC_OP_DYNAMIC:
649         /* unknown operation - assume 3 arguments and cc_op in env */
650         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
651         break;
652     default:
653         g_assert_not_reached();
654     }
655 
656     /* We now have cc in cc_op as constant */
657     set_cc_static(s);
658 }
659 
660 static bool use_goto_tb(DisasContext *s, uint64_t dest)
661 {
662     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
663         return false;
664     }
665     return translator_use_goto_tb(&s->base, dest);
666 }
667 
668 static void account_noninline_branch(DisasContext *s, int cc_op)
669 {
670 #ifdef DEBUG_INLINE_BRANCHES
671     inline_branch_miss[cc_op]++;
672 #endif
673 }
674 
675 static void account_inline_branch(DisasContext *s, int cc_op)
676 {
677 #ifdef DEBUG_INLINE_BRANCHES
678     inline_branch_hit[cc_op]++;
679 #endif
680 }
681 
682 /* Table of mask values to comparison codes, given a comparison as input.
683    For such, CC=3 should not be possible.  */
684 static const TCGCond ltgt_cond[16] = {
685     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
686     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
687     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
688     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
689     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
690     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
691     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
692     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
693 };
694 
695 /* Table of mask values to comparison codes, given a logic op as input.
696    For such, only CC=0 and CC=1 should be possible.  */
697 static const TCGCond nz_cond[16] = {
698     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
699     TCG_COND_NEVER, TCG_COND_NEVER,
700     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
701     TCG_COND_NE, TCG_COND_NE,
702     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
703     TCG_COND_EQ, TCG_COND_EQ,
704     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
705     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
706 };
707 
708 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
709    details required to generate a TCG comparison.  */
710 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
711 {
712     TCGCond cond;
713     enum cc_op old_cc_op = s->cc_op;
714 
715     if (mask == 15 || mask == 0) {
716         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
717         c->u.s32.a = cc_op;
718         c->u.s32.b = cc_op;
719         c->is_64 = false;
720         return;
721     }
722 
723     /* Find the TCG condition for the mask + cc op.  */
724     switch (old_cc_op) {
725     case CC_OP_LTGT0_32:
726     case CC_OP_LTGT0_64:
727     case CC_OP_LTGT_32:
728     case CC_OP_LTGT_64:
729         cond = ltgt_cond[mask];
730         if (cond == TCG_COND_NEVER) {
731             goto do_dynamic;
732         }
733         account_inline_branch(s, old_cc_op);
734         break;
735 
736     case CC_OP_LTUGTU_32:
737     case CC_OP_LTUGTU_64:
738         cond = tcg_unsigned_cond(ltgt_cond[mask]);
739         if (cond == TCG_COND_NEVER) {
740             goto do_dynamic;
741         }
742         account_inline_branch(s, old_cc_op);
743         break;
744 
745     case CC_OP_NZ:
746         cond = nz_cond[mask];
747         if (cond == TCG_COND_NEVER) {
748             goto do_dynamic;
749         }
750         account_inline_branch(s, old_cc_op);
751         break;
752 
753     case CC_OP_TM_32:
754     case CC_OP_TM_64:
755         switch (mask) {
756         case 8:
757             cond = TCG_COND_EQ;
758             break;
759         case 4 | 2 | 1:
760             cond = TCG_COND_NE;
761             break;
762         default:
763             goto do_dynamic;
764         }
765         account_inline_branch(s, old_cc_op);
766         break;
767 
768     case CC_OP_ICM:
769         switch (mask) {
770         case 8:
771             cond = TCG_COND_EQ;
772             break;
773         case 4 | 2 | 1:
774         case 4 | 2:
775             cond = TCG_COND_NE;
776             break;
777         default:
778             goto do_dynamic;
779         }
780         account_inline_branch(s, old_cc_op);
781         break;
782 
783     case CC_OP_FLOGR:
784         switch (mask & 0xa) {
785         case 8: /* src == 0 -> no one bit found */
786             cond = TCG_COND_EQ;
787             break;
788         case 2: /* src != 0 -> one bit found */
789             cond = TCG_COND_NE;
790             break;
791         default:
792             goto do_dynamic;
793         }
794         account_inline_branch(s, old_cc_op);
795         break;
796 
797     case CC_OP_ADDU:
798     case CC_OP_SUBU:
799         switch (mask) {
800         case 8 | 2: /* result == 0 */
801             cond = TCG_COND_EQ;
802             break;
803         case 4 | 1: /* result != 0 */
804             cond = TCG_COND_NE;
805             break;
806         case 8 | 4: /* !carry (borrow) */
807             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
808             break;
809         case 2 | 1: /* carry (!borrow) */
810             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
811             break;
812         default:
813             goto do_dynamic;
814         }
815         account_inline_branch(s, old_cc_op);
816         break;
817 
818     default:
819     do_dynamic:
820         /* Calculate cc value.  */
821         gen_op_calc_cc(s);
822         /* FALLTHRU */
823 
824     case CC_OP_STATIC:
825         /* Jump based on CC.  We'll load up the real cond below;
826            the assignment here merely avoids a compiler warning.  */
827         account_noninline_branch(s, old_cc_op);
828         old_cc_op = CC_OP_STATIC;
829         cond = TCG_COND_NEVER;
830         break;
831     }
832 
833     /* Load up the arguments of the comparison.  */
834     c->is_64 = true;
835     switch (old_cc_op) {
836     case CC_OP_LTGT0_32:
837         c->is_64 = false;
838         c->u.s32.a = tcg_temp_new_i32();
839         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
840         c->u.s32.b = tcg_constant_i32(0);
841         break;
842     case CC_OP_LTGT_32:
843     case CC_OP_LTUGTU_32:
844         c->is_64 = false;
845         c->u.s32.a = tcg_temp_new_i32();
846         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
847         c->u.s32.b = tcg_temp_new_i32();
848         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
849         break;
850 
851     case CC_OP_LTGT0_64:
852     case CC_OP_NZ:
853     case CC_OP_FLOGR:
854         c->u.s64.a = cc_dst;
855         c->u.s64.b = tcg_constant_i64(0);
856         break;
857     case CC_OP_LTGT_64:
858     case CC_OP_LTUGTU_64:
859         c->u.s64.a = cc_src;
860         c->u.s64.b = cc_dst;
861         break;
862 
863     case CC_OP_TM_32:
864     case CC_OP_TM_64:
865     case CC_OP_ICM:
866         c->u.s64.a = tcg_temp_new_i64();
867         c->u.s64.b = tcg_constant_i64(0);
868         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
869         break;
870 
871     case CC_OP_ADDU:
872     case CC_OP_SUBU:
873         c->is_64 = true;
874         c->u.s64.b = tcg_constant_i64(0);
875         switch (mask) {
876         case 8 | 2:
877         case 4 | 1: /* result */
878             c->u.s64.a = cc_dst;
879             break;
880         case 8 | 4:
881         case 2 | 1: /* carry */
882             c->u.s64.a = cc_src;
883             break;
884         default:
885             g_assert_not_reached();
886         }
887         break;
888 
889     case CC_OP_STATIC:
890         c->is_64 = false;
891         c->u.s32.a = cc_op;
892         switch (mask) {
893         case 0x8 | 0x4 | 0x2: /* cc != 3 */
894             cond = TCG_COND_NE;
895             c->u.s32.b = tcg_constant_i32(3);
896             break;
897         case 0x8 | 0x4 | 0x1: /* cc != 2 */
898             cond = TCG_COND_NE;
899             c->u.s32.b = tcg_constant_i32(2);
900             break;
901         case 0x8 | 0x2 | 0x1: /* cc != 1 */
902             cond = TCG_COND_NE;
903             c->u.s32.b = tcg_constant_i32(1);
904             break;
905         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
906             cond = TCG_COND_EQ;
907             c->u.s32.a = tcg_temp_new_i32();
908             c->u.s32.b = tcg_constant_i32(0);
909             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
910             break;
911         case 0x8 | 0x4: /* cc < 2 */
912             cond = TCG_COND_LTU;
913             c->u.s32.b = tcg_constant_i32(2);
914             break;
915         case 0x8: /* cc == 0 */
916             cond = TCG_COND_EQ;
917             c->u.s32.b = tcg_constant_i32(0);
918             break;
919         case 0x4 | 0x2 | 0x1: /* cc != 0 */
920             cond = TCG_COND_NE;
921             c->u.s32.b = tcg_constant_i32(0);
922             break;
923         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
924             cond = TCG_COND_NE;
925             c->u.s32.a = tcg_temp_new_i32();
926             c->u.s32.b = tcg_constant_i32(0);
927             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
928             break;
929         case 0x4: /* cc == 1 */
930             cond = TCG_COND_EQ;
931             c->u.s32.b = tcg_constant_i32(1);
932             break;
933         case 0x2 | 0x1: /* cc > 1 */
934             cond = TCG_COND_GTU;
935             c->u.s32.b = tcg_constant_i32(1);
936             break;
937         case 0x2: /* cc == 2 */
938             cond = TCG_COND_EQ;
939             c->u.s32.b = tcg_constant_i32(2);
940             break;
941         case 0x1: /* cc == 3 */
942             cond = TCG_COND_EQ;
943             c->u.s32.b = tcg_constant_i32(3);
944             break;
945         default:
946             /* CC is masked by something else: (8 >> cc) & mask.  */
947             cond = TCG_COND_NE;
948             c->u.s32.a = tcg_temp_new_i32();
949             c->u.s32.b = tcg_constant_i32(0);
950             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
951             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
952             break;
953         }
954         break;
955 
956     default:
957         abort();
958     }
959     c->cond = cond;
960 }
961 
962 /* ====================================================================== */
963 /* Define the insn format enumeration.  */
964 #define F0(N)                         FMT_##N,
965 #define F1(N, X1)                     F0(N)
966 #define F2(N, X1, X2)                 F0(N)
967 #define F3(N, X1, X2, X3)             F0(N)
968 #define F4(N, X1, X2, X3, X4)         F0(N)
969 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
970 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
971 
972 typedef enum {
973 #include "insn-format.h.inc"
974 } DisasFormat;
975 
976 #undef F0
977 #undef F1
978 #undef F2
979 #undef F3
980 #undef F4
981 #undef F5
982 #undef F6
983 
984 /* This is the way fields are to be accessed out of DisasFields.  */
985 #define have_field(S, F)  have_field1((S), FLD_O_##F)
986 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
987 
988 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
989 {
990     return (s->fields.presentO >> c) & 1;
991 }
992 
993 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
994                       enum DisasFieldIndexC c)
995 {
996     assert(have_field1(s, o));
997     return s->fields.c[c];
998 }
999 
1000 /* Describe the layout of each field in each format.  */
1001 typedef struct DisasField {
1002     unsigned int beg:8;
1003     unsigned int size:8;
1004     unsigned int type:2;
1005     unsigned int indexC:6;
1006     enum DisasFieldIndexO indexO:8;
1007 } DisasField;
1008 
1009 typedef struct DisasFormatInfo {
1010     DisasField op[NUM_C_FIELD];
1011 } DisasFormatInfo;
1012 
1013 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1014 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1015 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1016 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1017                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1018 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1020                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1021 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1022                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1023 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1025                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1026 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1027 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1028 
1029 #define F0(N)                     { { } },
1030 #define F1(N, X1)                 { { X1 } },
1031 #define F2(N, X1, X2)             { { X1, X2 } },
1032 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1033 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1034 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1035 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1036 
1037 static const DisasFormatInfo format_info[] = {
1038 #include "insn-format.h.inc"
1039 };
1040 
1041 #undef F0
1042 #undef F1
1043 #undef F2
1044 #undef F3
1045 #undef F4
1046 #undef F5
1047 #undef F6
1048 #undef R
1049 #undef M
1050 #undef V
1051 #undef BD
1052 #undef BXD
1053 #undef BDL
1054 #undef BXDL
1055 #undef I
1056 #undef L
1057 
1058 /* Generally, we'll extract operands into this structures, operate upon
1059    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1060    of routines below for more details.  */
1061 typedef struct {
1062     TCGv_i64 out, out2, in1, in2;
1063     TCGv_i64 addr1;
1064     TCGv_i128 out_128, in1_128, in2_128;
1065 } DisasOps;
1066 
1067 /* Instructions can place constraints on their operands, raising specification
1068    exceptions if they are violated.  To make this easy to automate, each "in1",
1069    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1070    of the following, or 0.  To make this easy to document, we'll put the
1071    SPEC_<name> defines next to <name>.  */
1072 
1073 #define SPEC_r1_even    1
1074 #define SPEC_r2_even    2
1075 #define SPEC_r3_even    4
1076 #define SPEC_r1_f128    8
1077 #define SPEC_r2_f128    16
1078 
1079 /* Return values from translate_one, indicating the state of the TB.  */
1080 
1081 /* We are not using a goto_tb (for whatever reason), but have updated
1082    the PC (for whatever reason), so there's no need to do it again on
1083    exiting the TB.  */
1084 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1085 
1086 /* We have updated the PC and CC values.  */
1087 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1088 
1089 
1090 /* Instruction flags */
1091 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1092 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1093 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1094 #define IF_BFP      0x0008      /* binary floating point instruction */
1095 #define IF_DFP      0x0010      /* decimal floating point instruction */
1096 #define IF_PRIV     0x0020      /* privileged instruction */
1097 #define IF_VEC      0x0040      /* vector instruction */
1098 #define IF_IO       0x0080      /* input/output instruction */
1099 
1100 struct DisasInsn {
1101     unsigned opc:16;
1102     unsigned flags:16;
1103     DisasFormat fmt:8;
1104     unsigned fac:8;
1105     unsigned spec:8;
1106 
1107     const char *name;
1108 
1109     /* Pre-process arguments before HELP_OP.  */
1110     void (*help_in1)(DisasContext *, DisasOps *);
1111     void (*help_in2)(DisasContext *, DisasOps *);
1112     void (*help_prep)(DisasContext *, DisasOps *);
1113 
1114     /*
1115      * Post-process output after HELP_OP.
1116      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1117      */
1118     void (*help_wout)(DisasContext *, DisasOps *);
1119     void (*help_cout)(DisasContext *, DisasOps *);
1120 
1121     /* Implement the operation itself.  */
1122     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1123 
1124     uint64_t data;
1125 };
1126 
1127 /* ====================================================================== */
1128 /* Miscellaneous helpers, used by several operations.  */
1129 
1130 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1131 {
1132     if (dest == s->pc_tmp) {
1133         per_branch(s, true);
1134         return DISAS_NEXT;
1135     }
1136     if (use_goto_tb(s, dest)) {
1137         update_cc_op(s);
1138         per_breaking_event(s);
1139         tcg_gen_goto_tb(0);
1140         tcg_gen_movi_i64(psw_addr, dest);
1141         tcg_gen_exit_tb(s->base.tb, 0);
1142         return DISAS_NORETURN;
1143     } else {
1144         tcg_gen_movi_i64(psw_addr, dest);
1145         per_branch(s, false);
1146         return DISAS_PC_UPDATED;
1147     }
1148 }
1149 
1150 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1151                                  bool is_imm, int imm, TCGv_i64 cdest)
1152 {
1153     DisasJumpType ret;
1154     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1155     TCGLabel *lab;
1156 
1157     /* Take care of the special cases first.  */
1158     if (c->cond == TCG_COND_NEVER) {
1159         ret = DISAS_NEXT;
1160         goto egress;
1161     }
1162     if (is_imm) {
1163         if (dest == s->pc_tmp) {
1164             /* Branch to next.  */
1165             per_branch(s, true);
1166             ret = DISAS_NEXT;
1167             goto egress;
1168         }
1169         if (c->cond == TCG_COND_ALWAYS) {
1170             ret = help_goto_direct(s, dest);
1171             goto egress;
1172         }
1173     } else {
1174         if (!cdest) {
1175             /* E.g. bcr %r0 -> no branch.  */
1176             ret = DISAS_NEXT;
1177             goto egress;
1178         }
1179         if (c->cond == TCG_COND_ALWAYS) {
1180             tcg_gen_mov_i64(psw_addr, cdest);
1181             per_branch(s, false);
1182             ret = DISAS_PC_UPDATED;
1183             goto egress;
1184         }
1185     }
1186 
1187     if (use_goto_tb(s, s->pc_tmp)) {
1188         if (is_imm && use_goto_tb(s, dest)) {
1189             /* Both exits can use goto_tb.  */
1190             update_cc_op(s);
1191 
1192             lab = gen_new_label();
1193             if (c->is_64) {
1194                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1195             } else {
1196                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1197             }
1198 
1199             /* Branch not taken.  */
1200             tcg_gen_goto_tb(0);
1201             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1202             tcg_gen_exit_tb(s->base.tb, 0);
1203 
1204             /* Branch taken.  */
1205             gen_set_label(lab);
1206             per_breaking_event(s);
1207             tcg_gen_goto_tb(1);
1208             tcg_gen_movi_i64(psw_addr, dest);
1209             tcg_gen_exit_tb(s->base.tb, 1);
1210 
1211             ret = DISAS_NORETURN;
1212         } else {
1213             /* Fallthru can use goto_tb, but taken branch cannot.  */
1214             /* Store taken branch destination before the brcond.  This
1215                avoids having to allocate a new local temp to hold it.
1216                We'll overwrite this in the not taken case anyway.  */
1217             if (!is_imm) {
1218                 tcg_gen_mov_i64(psw_addr, cdest);
1219             }
1220 
1221             lab = gen_new_label();
1222             if (c->is_64) {
1223                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1224             } else {
1225                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1226             }
1227 
1228             /* Branch not taken.  */
1229             update_cc_op(s);
1230             tcg_gen_goto_tb(0);
1231             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1232             tcg_gen_exit_tb(s->base.tb, 0);
1233 
1234             gen_set_label(lab);
1235             if (is_imm) {
1236                 tcg_gen_movi_i64(psw_addr, dest);
1237             }
1238             per_breaking_event(s);
1239             ret = DISAS_PC_UPDATED;
1240         }
1241     } else {
1242         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1243            Most commonly we're single-stepping or some other condition that
1244            disables all use of goto_tb.  Just update the PC and exit.  */
1245 
1246         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1247         if (is_imm) {
1248             cdest = tcg_constant_i64(dest);
1249         }
1250 
1251         if (c->is_64) {
1252             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1253                                 cdest, next);
1254             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1255         } else {
1256             TCGv_i32 t0 = tcg_temp_new_i32();
1257             TCGv_i64 t1 = tcg_temp_new_i64();
1258             TCGv_i64 z = tcg_constant_i64(0);
1259             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1260             tcg_gen_extu_i32_i64(t1, t0);
1261             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1262             per_branch_cond(s, TCG_COND_NE, t1, z);
1263         }
1264 
1265         ret = DISAS_PC_UPDATED;
1266     }
1267 
1268  egress:
1269     return ret;
1270 }
1271 
1272 /* ====================================================================== */
1273 /* The operations.  These perform the bulk of the work for any insn,
1274    usually after the operands have been loaded and output initialized.  */
1275 
1276 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1277 {
1278     tcg_gen_abs_i64(o->out, o->in2);
1279     return DISAS_NEXT;
1280 }
1281 
1282 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1283 {
1284     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1285     return DISAS_NEXT;
1286 }
1287 
1288 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1289 {
1290     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1291     return DISAS_NEXT;
1292 }
1293 
1294 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1295 {
1296     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1297     tcg_gen_mov_i64(o->out2, o->in2);
1298     return DISAS_NEXT;
1299 }
1300 
1301 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1302 {
1303     tcg_gen_add_i64(o->out, o->in1, o->in2);
1304     return DISAS_NEXT;
1305 }
1306 
1307 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1308 {
1309     tcg_gen_movi_i64(cc_src, 0);
1310     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1311     return DISAS_NEXT;
1312 }
1313 
1314 /* Compute carry into cc_src. */
1315 static void compute_carry(DisasContext *s)
1316 {
1317     switch (s->cc_op) {
1318     case CC_OP_ADDU:
1319         /* The carry value is already in cc_src (1,0). */
1320         break;
1321     case CC_OP_SUBU:
1322         tcg_gen_addi_i64(cc_src, cc_src, 1);
1323         break;
1324     default:
1325         gen_op_calc_cc(s);
1326         /* fall through */
1327     case CC_OP_STATIC:
1328         /* The carry flag is the msb of CC; compute into cc_src. */
1329         tcg_gen_extu_i32_i64(cc_src, cc_op);
1330         tcg_gen_shri_i64(cc_src, cc_src, 1);
1331         break;
1332     }
1333 }
1334 
1335 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1336 {
1337     compute_carry(s);
1338     tcg_gen_add_i64(o->out, o->in1, o->in2);
1339     tcg_gen_add_i64(o->out, o->out, cc_src);
1340     return DISAS_NEXT;
1341 }
1342 
1343 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1344 {
1345     compute_carry(s);
1346 
1347     TCGv_i64 zero = tcg_constant_i64(0);
1348     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1349     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1350 
1351     return DISAS_NEXT;
1352 }
1353 
1354 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1355 {
1356     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1357 
1358     o->in1 = tcg_temp_new_i64();
1359     if (non_atomic) {
1360         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1361     } else {
1362         /* Perform the atomic addition in memory. */
1363         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1364                                      s->insn->data);
1365     }
1366 
1367     /* Recompute also for atomic case: needed for setting CC. */
1368     tcg_gen_add_i64(o->out, o->in1, o->in2);
1369 
1370     if (non_atomic) {
1371         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1372     }
1373     return DISAS_NEXT;
1374 }
1375 
1376 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1377 {
1378     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1379 
1380     o->in1 = tcg_temp_new_i64();
1381     if (non_atomic) {
1382         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1383     } else {
1384         /* Perform the atomic addition in memory. */
1385         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1386                                      s->insn->data);
1387     }
1388 
1389     /* Recompute also for atomic case: needed for setting CC. */
1390     tcg_gen_movi_i64(cc_src, 0);
1391     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1392 
1393     if (non_atomic) {
1394         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1395     }
1396     return DISAS_NEXT;
1397 }
1398 
1399 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1400 {
1401     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1402     return DISAS_NEXT;
1403 }
1404 
1405 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1406 {
1407     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1408     return DISAS_NEXT;
1409 }
1410 
1411 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1412 {
1413     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1414     return DISAS_NEXT;
1415 }
1416 
1417 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1418 {
1419     tcg_gen_and_i64(o->out, o->in1, o->in2);
1420     return DISAS_NEXT;
1421 }
1422 
1423 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1424 {
1425     int shift = s->insn->data & 0xff;
1426     int size = s->insn->data >> 8;
1427     uint64_t mask = ((1ull << size) - 1) << shift;
1428     TCGv_i64 t = tcg_temp_new_i64();
1429 
1430     tcg_gen_shli_i64(t, o->in2, shift);
1431     tcg_gen_ori_i64(t, t, ~mask);
1432     tcg_gen_and_i64(o->out, o->in1, t);
1433 
1434     /* Produce the CC from only the bits manipulated.  */
1435     tcg_gen_andi_i64(cc_dst, o->out, mask);
1436     set_cc_nz_u64(s, cc_dst);
1437     return DISAS_NEXT;
1438 }
1439 
1440 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1441 {
1442     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1443     return DISAS_NEXT;
1444 }
1445 
1446 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1447 {
1448     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1449     return DISAS_NEXT;
1450 }
1451 
1452 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1453 {
1454     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1455     return DISAS_NEXT;
1456 }
1457 
1458 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1459 {
1460     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1461     return DISAS_NEXT;
1462 }
1463 
1464 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1465 {
1466     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1467     return DISAS_NEXT;
1468 }
1469 
1470 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1471 {
1472     o->in1 = tcg_temp_new_i64();
1473 
1474     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1475         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1476     } else {
1477         /* Perform the atomic operation in memory. */
1478         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1479                                      s->insn->data);
1480     }
1481 
1482     /* Recompute also for atomic case: needed for setting CC. */
1483     tcg_gen_and_i64(o->out, o->in1, o->in2);
1484 
1485     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1486         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1487     }
1488     return DISAS_NEXT;
1489 }
1490 
1491 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1492 {
1493     pc_to_link_info(o->out, s, s->pc_tmp);
1494     if (o->in2) {
1495         tcg_gen_mov_i64(psw_addr, o->in2);
1496         per_branch(s, false);
1497         return DISAS_PC_UPDATED;
1498     } else {
1499         return DISAS_NEXT;
1500     }
1501 }
1502 
1503 static void save_link_info(DisasContext *s, DisasOps *o)
1504 {
1505     TCGv_i64 t;
1506 
1507     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1508         pc_to_link_info(o->out, s, s->pc_tmp);
1509         return;
1510     }
1511     gen_op_calc_cc(s);
1512     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1513     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1514     t = tcg_temp_new_i64();
1515     tcg_gen_shri_i64(t, psw_mask, 16);
1516     tcg_gen_andi_i64(t, t, 0x0f000000);
1517     tcg_gen_or_i64(o->out, o->out, t);
1518     tcg_gen_extu_i32_i64(t, cc_op);
1519     tcg_gen_shli_i64(t, t, 28);
1520     tcg_gen_or_i64(o->out, o->out, t);
1521 }
1522 
1523 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1524 {
1525     save_link_info(s, o);
1526     if (o->in2) {
1527         tcg_gen_mov_i64(psw_addr, o->in2);
1528         per_branch(s, false);
1529         return DISAS_PC_UPDATED;
1530     } else {
1531         return DISAS_NEXT;
1532     }
1533 }
1534 
1535 /*
1536  * Disassemble the target of a branch. The results are returned in a form
1537  * suitable for passing into help_branch():
1538  *
1539  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1540  *   branches, whose DisasContext *S contains the relative immediate field RI,
1541  *   are considered fixed. All the other branches are considered computed.
1542  * - int IMM is the value of RI.
1543  * - TCGv_i64 CDEST is the address of the computed target.
1544  */
1545 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1546     if (have_field(s, ri)) {                                                   \
1547         if (unlikely(s->ex_value)) {                                           \
1548             cdest = tcg_temp_new_i64();                                        \
1549             tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\
1550             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1551             is_imm = false;                                                    \
1552         } else {                                                               \
1553             is_imm = true;                                                     \
1554         }                                                                      \
1555     } else {                                                                   \
1556         is_imm = false;                                                        \
1557     }                                                                          \
1558     imm = is_imm ? get_field(s, ri) : 0;                                       \
1559 } while (false)
1560 
1561 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1562 {
1563     DisasCompare c;
1564     bool is_imm;
1565     int imm;
1566 
1567     pc_to_link_info(o->out, s, s->pc_tmp);
1568 
1569     disas_jdest(s, i2, is_imm, imm, o->in2);
1570     disas_jcc(s, &c, 0xf);
1571     return help_branch(s, &c, is_imm, imm, o->in2);
1572 }
1573 
1574 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1575 {
1576     int m1 = get_field(s, m1);
1577     DisasCompare c;
1578     bool is_imm;
1579     int imm;
1580 
1581     /* BCR with R2 = 0 causes no branching */
1582     if (have_field(s, r2) && get_field(s, r2) == 0) {
1583         if (m1 == 14) {
1584             /* Perform serialization */
1585             /* FIXME: check for fast-BCR-serialization facility */
1586             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1587         }
1588         if (m1 == 15) {
1589             /* Perform serialization */
1590             /* FIXME: perform checkpoint-synchronisation */
1591             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1592         }
1593         return DISAS_NEXT;
1594     }
1595 
1596     disas_jdest(s, i2, is_imm, imm, o->in2);
1597     disas_jcc(s, &c, m1);
1598     return help_branch(s, &c, is_imm, imm, o->in2);
1599 }
1600 
1601 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1602 {
1603     int r1 = get_field(s, r1);
1604     DisasCompare c;
1605     bool is_imm;
1606     TCGv_i64 t;
1607     int imm;
1608 
1609     c.cond = TCG_COND_NE;
1610     c.is_64 = false;
1611 
1612     t = tcg_temp_new_i64();
1613     tcg_gen_subi_i64(t, regs[r1], 1);
1614     store_reg32_i64(r1, t);
1615     c.u.s32.a = tcg_temp_new_i32();
1616     c.u.s32.b = tcg_constant_i32(0);
1617     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1618 
1619     disas_jdest(s, i2, is_imm, imm, o->in2);
1620     return help_branch(s, &c, is_imm, imm, o->in2);
1621 }
1622 
1623 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1624 {
1625     int r1 = get_field(s, r1);
1626     int imm = get_field(s, i2);
1627     DisasCompare c;
1628     TCGv_i64 t;
1629 
1630     c.cond = TCG_COND_NE;
1631     c.is_64 = false;
1632 
1633     t = tcg_temp_new_i64();
1634     tcg_gen_shri_i64(t, regs[r1], 32);
1635     tcg_gen_subi_i64(t, t, 1);
1636     store_reg32h_i64(r1, t);
1637     c.u.s32.a = tcg_temp_new_i32();
1638     c.u.s32.b = tcg_constant_i32(0);
1639     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1640 
1641     return help_branch(s, &c, 1, imm, o->in2);
1642 }
1643 
1644 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1645 {
1646     int r1 = get_field(s, r1);
1647     DisasCompare c;
1648     bool is_imm;
1649     int imm;
1650 
1651     c.cond = TCG_COND_NE;
1652     c.is_64 = true;
1653 
1654     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1655     c.u.s64.a = regs[r1];
1656     c.u.s64.b = tcg_constant_i64(0);
1657 
1658     disas_jdest(s, i2, is_imm, imm, o->in2);
1659     return help_branch(s, &c, is_imm, imm, o->in2);
1660 }
1661 
1662 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1663 {
1664     int r1 = get_field(s, r1);
1665     int r3 = get_field(s, r3);
1666     DisasCompare c;
1667     bool is_imm;
1668     TCGv_i64 t;
1669     int imm;
1670 
1671     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1672     c.is_64 = false;
1673 
1674     t = tcg_temp_new_i64();
1675     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1676     c.u.s32.a = tcg_temp_new_i32();
1677     c.u.s32.b = tcg_temp_new_i32();
1678     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1679     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1680     store_reg32_i64(r1, t);
1681 
1682     disas_jdest(s, i2, is_imm, imm, o->in2);
1683     return help_branch(s, &c, is_imm, imm, o->in2);
1684 }
1685 
1686 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1687 {
1688     int r1 = get_field(s, r1);
1689     int r3 = get_field(s, r3);
1690     DisasCompare c;
1691     bool is_imm;
1692     int imm;
1693 
1694     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1695     c.is_64 = true;
1696 
1697     if (r1 == (r3 | 1)) {
1698         c.u.s64.b = load_reg(r3 | 1);
1699     } else {
1700         c.u.s64.b = regs[r3 | 1];
1701     }
1702 
1703     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1704     c.u.s64.a = regs[r1];
1705 
1706     disas_jdest(s, i2, is_imm, imm, o->in2);
1707     return help_branch(s, &c, is_imm, imm, o->in2);
1708 }
1709 
1710 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1711 {
1712     int imm, m3 = get_field(s, m3);
1713     bool is_imm;
1714     DisasCompare c;
1715 
1716     c.cond = ltgt_cond[m3];
1717     if (s->insn->data) {
1718         c.cond = tcg_unsigned_cond(c.cond);
1719     }
1720     c.is_64 = true;
1721     c.u.s64.a = o->in1;
1722     c.u.s64.b = o->in2;
1723 
1724     o->out = NULL;
1725     disas_jdest(s, i4, is_imm, imm, o->out);
1726     if (!is_imm && !o->out) {
1727         imm = 0;
1728         o->out = get_address(s, 0, get_field(s, b4),
1729                              get_field(s, d4));
1730     }
1731 
1732     return help_branch(s, &c, is_imm, imm, o->out);
1733 }
1734 
1735 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1736 {
1737     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1738     set_cc_static(s);
1739     return DISAS_NEXT;
1740 }
1741 
1742 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1743 {
1744     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1745     set_cc_static(s);
1746     return DISAS_NEXT;
1747 }
1748 
1749 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1750 {
1751     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1752     set_cc_static(s);
1753     return DISAS_NEXT;
1754 }
1755 
1756 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1757                                    bool m4_with_fpe)
1758 {
1759     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1760     uint8_t m3 = get_field(s, m3);
1761     uint8_t m4 = get_field(s, m4);
1762 
1763     /* m3 field was introduced with FPE */
1764     if (!fpe && m3_with_fpe) {
1765         m3 = 0;
1766     }
1767     /* m4 field was introduced with FPE */
1768     if (!fpe && m4_with_fpe) {
1769         m4 = 0;
1770     }
1771 
1772     /* Check for valid rounding modes. Mode 3 was introduced later. */
1773     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1774         gen_program_exception(s, PGM_SPECIFICATION);
1775         return NULL;
1776     }
1777 
1778     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1779 }
1780 
1781 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1782 {
1783     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1784 
1785     if (!m34) {
1786         return DISAS_NORETURN;
1787     }
1788     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1789     set_cc_static(s);
1790     return DISAS_NEXT;
1791 }
1792 
1793 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1794 {
1795     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1796 
1797     if (!m34) {
1798         return DISAS_NORETURN;
1799     }
1800     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1801     set_cc_static(s);
1802     return DISAS_NEXT;
1803 }
1804 
1805 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1806 {
1807     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1808 
1809     if (!m34) {
1810         return DISAS_NORETURN;
1811     }
1812     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1813     set_cc_static(s);
1814     return DISAS_NEXT;
1815 }
1816 
1817 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1818 {
1819     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1820 
1821     if (!m34) {
1822         return DISAS_NORETURN;
1823     }
1824     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1825     set_cc_static(s);
1826     return DISAS_NEXT;
1827 }
1828 
1829 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1830 {
1831     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1832 
1833     if (!m34) {
1834         return DISAS_NORETURN;
1835     }
1836     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1837     set_cc_static(s);
1838     return DISAS_NEXT;
1839 }
1840 
1841 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1842 {
1843     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1844 
1845     if (!m34) {
1846         return DISAS_NORETURN;
1847     }
1848     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1849     set_cc_static(s);
1850     return DISAS_NEXT;
1851 }
1852 
1853 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1854 {
1855     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1856 
1857     if (!m34) {
1858         return DISAS_NORETURN;
1859     }
1860     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1861     set_cc_static(s);
1862     return DISAS_NEXT;
1863 }
1864 
1865 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1866 {
1867     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1868 
1869     if (!m34) {
1870         return DISAS_NORETURN;
1871     }
1872     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1873     set_cc_static(s);
1874     return DISAS_NEXT;
1875 }
1876 
1877 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1878 {
1879     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1880 
1881     if (!m34) {
1882         return DISAS_NORETURN;
1883     }
1884     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1885     set_cc_static(s);
1886     return DISAS_NEXT;
1887 }
1888 
1889 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1890 {
1891     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1892 
1893     if (!m34) {
1894         return DISAS_NORETURN;
1895     }
1896     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1897     set_cc_static(s);
1898     return DISAS_NEXT;
1899 }
1900 
1901 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1902 {
1903     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1904 
1905     if (!m34) {
1906         return DISAS_NORETURN;
1907     }
1908     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1909     set_cc_static(s);
1910     return DISAS_NEXT;
1911 }
1912 
1913 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1914 {
1915     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1916 
1917     if (!m34) {
1918         return DISAS_NORETURN;
1919     }
1920     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1921     set_cc_static(s);
1922     return DISAS_NEXT;
1923 }
1924 
1925 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1926 {
1927     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1928 
1929     if (!m34) {
1930         return DISAS_NORETURN;
1931     }
1932     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1933     return DISAS_NEXT;
1934 }
1935 
1936 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1937 {
1938     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1939 
1940     if (!m34) {
1941         return DISAS_NORETURN;
1942     }
1943     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1944     return DISAS_NEXT;
1945 }
1946 
1947 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1948 {
1949     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1950 
1951     if (!m34) {
1952         return DISAS_NORETURN;
1953     }
1954     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
1955     return DISAS_NEXT;
1956 }
1957 
1958 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1959 {
1960     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1961 
1962     if (!m34) {
1963         return DISAS_NORETURN;
1964     }
1965     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1966     return DISAS_NEXT;
1967 }
1968 
1969 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1970 {
1971     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1972 
1973     if (!m34) {
1974         return DISAS_NORETURN;
1975     }
1976     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
1977     return DISAS_NEXT;
1978 }
1979 
1980 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1981 {
1982     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1983 
1984     if (!m34) {
1985         return DISAS_NORETURN;
1986     }
1987     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
1988     return DISAS_NEXT;
1989 }
1990 
1991 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1992 {
1993     int r2 = get_field(s, r2);
1994     TCGv_i128 pair = tcg_temp_new_i128();
1995     TCGv_i64 len = tcg_temp_new_i64();
1996 
1997     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1998     set_cc_static(s);
1999     tcg_gen_extr_i128_i64(o->out, len, pair);
2000 
2001     tcg_gen_add_i64(regs[r2], regs[r2], len);
2002     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2003 
2004     return DISAS_NEXT;
2005 }
2006 
2007 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2008 {
2009     int l = get_field(s, l1);
2010     TCGv_i32 vl;
2011     MemOp mop;
2012 
2013     switch (l + 1) {
2014     case 1:
2015     case 2:
2016     case 4:
2017     case 8:
2018         mop = ctz32(l + 1) | MO_TE;
2019         tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop);
2020         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
2021         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2022         return DISAS_NEXT;
2023     default:
2024         vl = tcg_constant_i32(l);
2025         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2026         set_cc_static(s);
2027         return DISAS_NEXT;
2028     }
2029 }
2030 
2031 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2032 {
2033     int r1 = get_field(s, r1);
2034     int r2 = get_field(s, r2);
2035     TCGv_i32 t1, t2;
2036 
2037     /* r1 and r2 must be even.  */
2038     if (r1 & 1 || r2 & 1) {
2039         gen_program_exception(s, PGM_SPECIFICATION);
2040         return DISAS_NORETURN;
2041     }
2042 
2043     t1 = tcg_constant_i32(r1);
2044     t2 = tcg_constant_i32(r2);
2045     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2046     set_cc_static(s);
2047     return DISAS_NEXT;
2048 }
2049 
2050 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2051 {
2052     int r1 = get_field(s, r1);
2053     int r3 = get_field(s, r3);
2054     TCGv_i32 t1, t3;
2055 
2056     /* r1 and r3 must be even.  */
2057     if (r1 & 1 || r3 & 1) {
2058         gen_program_exception(s, PGM_SPECIFICATION);
2059         return DISAS_NORETURN;
2060     }
2061 
2062     t1 = tcg_constant_i32(r1);
2063     t3 = tcg_constant_i32(r3);
2064     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2065     set_cc_static(s);
2066     return DISAS_NEXT;
2067 }
2068 
2069 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2070 {
2071     int r1 = get_field(s, r1);
2072     int r3 = get_field(s, r3);
2073     TCGv_i32 t1, t3;
2074 
2075     /* r1 and r3 must be even.  */
2076     if (r1 & 1 || r3 & 1) {
2077         gen_program_exception(s, PGM_SPECIFICATION);
2078         return DISAS_NORETURN;
2079     }
2080 
2081     t1 = tcg_constant_i32(r1);
2082     t3 = tcg_constant_i32(r3);
2083     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2084     set_cc_static(s);
2085     return DISAS_NEXT;
2086 }
2087 
2088 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2089 {
2090     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2091     TCGv_i32 t1 = tcg_temp_new_i32();
2092 
2093     tcg_gen_extrl_i64_i32(t1, o->in1);
2094     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2095     set_cc_static(s);
2096     return DISAS_NEXT;
2097 }
2098 
2099 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2100 {
2101     TCGv_i128 pair = tcg_temp_new_i128();
2102 
2103     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2104     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2105 
2106     set_cc_static(s);
2107     return DISAS_NEXT;
2108 }
2109 
2110 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2111 {
2112     TCGv_i64 t = tcg_temp_new_i64();
2113     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2114     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2115     tcg_gen_or_i64(o->out, o->out, t);
2116     return DISAS_NEXT;
2117 }
2118 
2119 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2120 {
2121     int d2 = get_field(s, d2);
2122     int b2 = get_field(s, b2);
2123     TCGv_i64 addr, cc;
2124 
2125     /* Note that in1 = R3 (new value) and
2126        in2 = (zero-extended) R1 (expected value).  */
2127 
2128     addr = get_address(s, 0, b2, d2);
2129     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2130                                get_mem_index(s), s->insn->data | MO_ALIGN);
2131 
2132     /* Are the memory and expected values (un)equal?  Note that this setcond
2133        produces the output CC value, thus the NE sense of the test.  */
2134     cc = tcg_temp_new_i64();
2135     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2136     tcg_gen_extrl_i64_i32(cc_op, cc);
2137     set_cc_static(s);
2138 
2139     return DISAS_NEXT;
2140 }
2141 
2142 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2143 {
2144     int r1 = get_field(s, r1);
2145 
2146     o->out_128 = tcg_temp_new_i128();
2147     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2148 
2149     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2150     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2151                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2152 
2153     /*
2154      * Extract result into cc_dst:cc_src, compare vs the expected value
2155      * in the as yet unmodified input registers, then update CC_OP.
2156      */
2157     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2158     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2159     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2160     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2161     set_cc_nz_u64(s, cc_dst);
2162 
2163     return DISAS_NEXT;
2164 }
2165 
2166 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2167 {
2168     int r3 = get_field(s, r3);
2169     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2170 
2171     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2172         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2173     } else {
2174         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2175     }
2176 
2177     set_cc_static(s);
2178     return DISAS_NEXT;
2179 }
2180 
2181 #ifndef CONFIG_USER_ONLY
2182 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2183 {
2184     MemOp mop = s->insn->data;
2185     TCGv_i64 addr, old, cc;
2186     TCGLabel *lab = gen_new_label();
2187 
2188     /* Note that in1 = R1 (zero-extended expected value),
2189        out = R1 (original reg), out2 = R1+1 (new value).  */
2190 
2191     addr = tcg_temp_new_i64();
2192     old = tcg_temp_new_i64();
2193     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2194     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2195                                get_mem_index(s), mop | MO_ALIGN);
2196 
2197     /* Are the memory and expected values (un)equal?  */
2198     cc = tcg_temp_new_i64();
2199     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2200     tcg_gen_extrl_i64_i32(cc_op, cc);
2201 
2202     /* Write back the output now, so that it happens before the
2203        following branch, so that we don't need local temps.  */
2204     if ((mop & MO_SIZE) == MO_32) {
2205         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2206     } else {
2207         tcg_gen_mov_i64(o->out, old);
2208     }
2209 
2210     /* If the comparison was equal, and the LSB of R2 was set,
2211        then we need to flush the TLB (for all cpus).  */
2212     tcg_gen_xori_i64(cc, cc, 1);
2213     tcg_gen_and_i64(cc, cc, o->in2);
2214     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2215 
2216     gen_helper_purge(cpu_env);
2217     gen_set_label(lab);
2218 
2219     return DISAS_NEXT;
2220 }
2221 #endif
2222 
2223 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2224 {
2225     TCGv_i64 t1 = tcg_temp_new_i64();
2226     TCGv_i32 t2 = tcg_temp_new_i32();
2227     tcg_gen_extrl_i64_i32(t2, o->in1);
2228     gen_helper_cvd(t1, t2);
2229     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2230     return DISAS_NEXT;
2231 }
2232 
2233 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2234 {
2235     int m3 = get_field(s, m3);
2236     TCGLabel *lab = gen_new_label();
2237     TCGCond c;
2238 
2239     c = tcg_invert_cond(ltgt_cond[m3]);
2240     if (s->insn->data) {
2241         c = tcg_unsigned_cond(c);
2242     }
2243     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2244 
2245     /* Trap.  */
2246     gen_trap(s);
2247 
2248     gen_set_label(lab);
2249     return DISAS_NEXT;
2250 }
2251 
2252 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2253 {
2254     int m3 = get_field(s, m3);
2255     int r1 = get_field(s, r1);
2256     int r2 = get_field(s, r2);
2257     TCGv_i32 tr1, tr2, chk;
2258 
2259     /* R1 and R2 must both be even.  */
2260     if ((r1 | r2) & 1) {
2261         gen_program_exception(s, PGM_SPECIFICATION);
2262         return DISAS_NORETURN;
2263     }
2264     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2265         m3 = 0;
2266     }
2267 
2268     tr1 = tcg_constant_i32(r1);
2269     tr2 = tcg_constant_i32(r2);
2270     chk = tcg_constant_i32(m3);
2271 
2272     switch (s->insn->data) {
2273     case 12:
2274         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2275         break;
2276     case 14:
2277         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2278         break;
2279     case 21:
2280         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2281         break;
2282     case 24:
2283         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2284         break;
2285     case 41:
2286         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2287         break;
2288     case 42:
2289         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2290         break;
2291     default:
2292         g_assert_not_reached();
2293     }
2294 
2295     set_cc_static(s);
2296     return DISAS_NEXT;
2297 }
2298 
2299 #ifndef CONFIG_USER_ONLY
2300 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2301 {
2302     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2303     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2304     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2305 
2306     gen_helper_diag(cpu_env, r1, r3, func_code);
2307     return DISAS_NEXT;
2308 }
2309 #endif
2310 
2311 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2312 {
2313     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2314     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2315     return DISAS_NEXT;
2316 }
2317 
2318 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2319 {
2320     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2321     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2322     return DISAS_NEXT;
2323 }
2324 
2325 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2326 {
2327     TCGv_i128 t = tcg_temp_new_i128();
2328 
2329     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2330     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2331     return DISAS_NEXT;
2332 }
2333 
2334 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2335 {
2336     TCGv_i128 t = tcg_temp_new_i128();
2337 
2338     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2339     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2340     return DISAS_NEXT;
2341 }
2342 
2343 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2344 {
2345     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2346     return DISAS_NEXT;
2347 }
2348 
2349 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2350 {
2351     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2352     return DISAS_NEXT;
2353 }
2354 
2355 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2356 {
2357     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2358     return DISAS_NEXT;
2359 }
2360 
2361 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2362 {
2363     int r2 = get_field(s, r2);
2364     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2365     return DISAS_NEXT;
2366 }
2367 
2368 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2369 {
2370     /* No cache information provided.  */
2371     tcg_gen_movi_i64(o->out, -1);
2372     return DISAS_NEXT;
2373 }
2374 
2375 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2376 {
2377     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2378     return DISAS_NEXT;
2379 }
2380 
2381 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2382 {
2383     int r1 = get_field(s, r1);
2384     int r2 = get_field(s, r2);
2385     TCGv_i64 t = tcg_temp_new_i64();
2386     TCGv_i64 t_cc = tcg_temp_new_i64();
2387 
2388     /* Note the "subsequently" in the PoO, which implies a defined result
2389        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2390     gen_op_calc_cc(s);
2391     tcg_gen_extu_i32_i64(t_cc, cc_op);
2392     tcg_gen_shri_i64(t, psw_mask, 32);
2393     tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2394     store_reg32_i64(r1, t);
2395     if (r2 != 0) {
2396         store_reg32_i64(r2, psw_mask);
2397     }
2398     return DISAS_NEXT;
2399 }
2400 
2401 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2402 {
2403     int r1 = get_field(s, r1);
2404     TCGv_i32 ilen;
2405     TCGv_i64 v1;
2406 
2407     /* Nested EXECUTE is not allowed.  */
2408     if (unlikely(s->ex_value)) {
2409         gen_program_exception(s, PGM_EXECUTE);
2410         return DISAS_NORETURN;
2411     }
2412 
2413     update_psw_addr(s);
2414     update_cc_op(s);
2415 
2416     if (r1 == 0) {
2417         v1 = tcg_constant_i64(0);
2418     } else {
2419         v1 = regs[r1];
2420     }
2421 
2422     ilen = tcg_constant_i32(s->ilen);
2423     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2424 
2425     return DISAS_PC_CC_UPDATED;
2426 }
2427 
2428 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2429 {
2430     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2431 
2432     if (!m34) {
2433         return DISAS_NORETURN;
2434     }
2435     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2436     return DISAS_NEXT;
2437 }
2438 
2439 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2440 {
2441     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2442 
2443     if (!m34) {
2444         return DISAS_NORETURN;
2445     }
2446     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2447     return DISAS_NEXT;
2448 }
2449 
2450 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2451 {
2452     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2453 
2454     if (!m34) {
2455         return DISAS_NORETURN;
2456     }
2457     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2458     return DISAS_NEXT;
2459 }
2460 
2461 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2462 {
2463     /* We'll use the original input for cc computation, since we get to
2464        compare that against 0, which ought to be better than comparing
2465        the real output against 64.  It also lets cc_dst be a convenient
2466        temporary during our computation.  */
2467     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2468 
2469     /* R1 = IN ? CLZ(IN) : 64.  */
2470     tcg_gen_clzi_i64(o->out, o->in2, 64);
2471 
2472     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2473        value by 64, which is undefined.  But since the shift is 64 iff the
2474        input is zero, we still get the correct result after and'ing.  */
2475     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2476     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2477     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2478     return DISAS_NEXT;
2479 }
2480 
2481 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2482 {
2483     int m3 = get_field(s, m3);
2484     int pos, len, base = s->insn->data;
2485     TCGv_i64 tmp = tcg_temp_new_i64();
2486     uint64_t ccm;
2487 
2488     switch (m3) {
2489     case 0xf:
2490         /* Effectively a 32-bit load.  */
2491         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2492         len = 32;
2493         goto one_insert;
2494 
2495     case 0xc:
2496     case 0x6:
2497     case 0x3:
2498         /* Effectively a 16-bit load.  */
2499         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2500         len = 16;
2501         goto one_insert;
2502 
2503     case 0x8:
2504     case 0x4:
2505     case 0x2:
2506     case 0x1:
2507         /* Effectively an 8-bit load.  */
2508         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2509         len = 8;
2510         goto one_insert;
2511 
2512     one_insert:
2513         pos = base + ctz32(m3) * 8;
2514         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2515         ccm = ((1ull << len) - 1) << pos;
2516         break;
2517 
2518     case 0:
2519         /* Recognize access exceptions for the first byte.  */
2520         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2521         gen_op_movi_cc(s, 0);
2522         return DISAS_NEXT;
2523 
2524     default:
2525         /* This is going to be a sequence of loads and inserts.  */
2526         pos = base + 32 - 8;
2527         ccm = 0;
2528         while (m3) {
2529             if (m3 & 0x8) {
2530                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2531                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2532                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2533                 ccm |= 0xffull << pos;
2534             }
2535             m3 = (m3 << 1) & 0xf;
2536             pos -= 8;
2537         }
2538         break;
2539     }
2540 
2541     tcg_gen_movi_i64(tmp, ccm);
2542     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2543     return DISAS_NEXT;
2544 }
2545 
2546 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2547 {
2548     int shift = s->insn->data & 0xff;
2549     int size = s->insn->data >> 8;
2550     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2551     return DISAS_NEXT;
2552 }
2553 
2554 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2555 {
2556     TCGv_i64 t1, t2;
2557 
2558     gen_op_calc_cc(s);
2559     t1 = tcg_temp_new_i64();
2560     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2561     t2 = tcg_temp_new_i64();
2562     tcg_gen_extu_i32_i64(t2, cc_op);
2563     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2564     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2565     return DISAS_NEXT;
2566 }
2567 
2568 #ifndef CONFIG_USER_ONLY
2569 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2570 {
2571     TCGv_i32 m4;
2572 
2573     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2574         m4 = tcg_constant_i32(get_field(s, m4));
2575     } else {
2576         m4 = tcg_constant_i32(0);
2577     }
2578     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2579     return DISAS_NEXT;
2580 }
2581 
2582 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2583 {
2584     TCGv_i32 m4;
2585 
2586     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2587         m4 = tcg_constant_i32(get_field(s, m4));
2588     } else {
2589         m4 = tcg_constant_i32(0);
2590     }
2591     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2592     return DISAS_NEXT;
2593 }
2594 
2595 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2596 {
2597     gen_helper_iske(o->out, cpu_env, o->in2);
2598     return DISAS_NEXT;
2599 }
2600 #endif
2601 
2602 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2603 {
2604     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2605     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2606     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2607     TCGv_i32 t_r1, t_r2, t_r3, type;
2608 
2609     switch (s->insn->data) {
2610     case S390_FEAT_TYPE_KMA:
2611         if (r3 == r1 || r3 == r2) {
2612             gen_program_exception(s, PGM_SPECIFICATION);
2613             return DISAS_NORETURN;
2614         }
2615         /* FALL THROUGH */
2616     case S390_FEAT_TYPE_KMCTR:
2617         if (r3 & 1 || !r3) {
2618             gen_program_exception(s, PGM_SPECIFICATION);
2619             return DISAS_NORETURN;
2620         }
2621         /* FALL THROUGH */
2622     case S390_FEAT_TYPE_PPNO:
2623     case S390_FEAT_TYPE_KMF:
2624     case S390_FEAT_TYPE_KMC:
2625     case S390_FEAT_TYPE_KMO:
2626     case S390_FEAT_TYPE_KM:
2627         if (r1 & 1 || !r1) {
2628             gen_program_exception(s, PGM_SPECIFICATION);
2629             return DISAS_NORETURN;
2630         }
2631         /* FALL THROUGH */
2632     case S390_FEAT_TYPE_KMAC:
2633     case S390_FEAT_TYPE_KIMD:
2634     case S390_FEAT_TYPE_KLMD:
2635         if (r2 & 1 || !r2) {
2636             gen_program_exception(s, PGM_SPECIFICATION);
2637             return DISAS_NORETURN;
2638         }
2639         /* FALL THROUGH */
2640     case S390_FEAT_TYPE_PCKMO:
2641     case S390_FEAT_TYPE_PCC:
2642         break;
2643     default:
2644         g_assert_not_reached();
2645     };
2646 
2647     t_r1 = tcg_constant_i32(r1);
2648     t_r2 = tcg_constant_i32(r2);
2649     t_r3 = tcg_constant_i32(r3);
2650     type = tcg_constant_i32(s->insn->data);
2651     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2652     set_cc_static(s);
2653     return DISAS_NEXT;
2654 }
2655 
2656 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2657 {
2658     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2659     set_cc_static(s);
2660     return DISAS_NEXT;
2661 }
2662 
2663 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2664 {
2665     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2666     set_cc_static(s);
2667     return DISAS_NEXT;
2668 }
2669 
2670 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2671 {
2672     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2673     set_cc_static(s);
2674     return DISAS_NEXT;
2675 }
2676 
2677 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2678 {
2679     /* The real output is indeed the original value in memory;
2680        recompute the addition for the computation of CC.  */
2681     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2682                                  s->insn->data | MO_ALIGN);
2683     /* However, we need to recompute the addition for setting CC.  */
2684     tcg_gen_add_i64(o->out, o->in1, o->in2);
2685     return DISAS_NEXT;
2686 }
2687 
2688 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2689 {
2690     /* The real output is indeed the original value in memory;
2691        recompute the addition for the computation of CC.  */
2692     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2693                                  s->insn->data | MO_ALIGN);
2694     /* However, we need to recompute the operation for setting CC.  */
2695     tcg_gen_and_i64(o->out, o->in1, o->in2);
2696     return DISAS_NEXT;
2697 }
2698 
2699 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2700 {
2701     /* The real output is indeed the original value in memory;
2702        recompute the addition for the computation of CC.  */
2703     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2704                                 s->insn->data | MO_ALIGN);
2705     /* However, we need to recompute the operation for setting CC.  */
2706     tcg_gen_or_i64(o->out, o->in1, o->in2);
2707     return DISAS_NEXT;
2708 }
2709 
2710 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2711 {
2712     /* The real output is indeed the original value in memory;
2713        recompute the addition for the computation of CC.  */
2714     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2715                                  s->insn->data | MO_ALIGN);
2716     /* However, we need to recompute the operation for setting CC.  */
2717     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2718     return DISAS_NEXT;
2719 }
2720 
2721 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2722 {
2723     gen_helper_ldeb(o->out, cpu_env, o->in2);
2724     return DISAS_NEXT;
2725 }
2726 
2727 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2728 {
2729     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2730 
2731     if (!m34) {
2732         return DISAS_NORETURN;
2733     }
2734     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2735     return DISAS_NEXT;
2736 }
2737 
2738 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2739 {
2740     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2741 
2742     if (!m34) {
2743         return DISAS_NORETURN;
2744     }
2745     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2746     return DISAS_NEXT;
2747 }
2748 
2749 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2750 {
2751     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2752 
2753     if (!m34) {
2754         return DISAS_NORETURN;
2755     }
2756     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2757     return DISAS_NEXT;
2758 }
2759 
2760 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2761 {
2762     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2763     return DISAS_NEXT;
2764 }
2765 
2766 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2767 {
2768     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2769     return DISAS_NEXT;
2770 }
2771 
2772 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2773 {
2774     tcg_gen_shli_i64(o->out, o->in2, 32);
2775     return DISAS_NEXT;
2776 }
2777 
2778 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2779 {
2780     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2781     return DISAS_NEXT;
2782 }
2783 
2784 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2785 {
2786     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2787     return DISAS_NEXT;
2788 }
2789 
2790 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2791 {
2792     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2793     return DISAS_NEXT;
2794 }
2795 
2796 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2797 {
2798     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2799     return DISAS_NEXT;
2800 }
2801 
2802 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2803 {
2804     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2805     return DISAS_NEXT;
2806 }
2807 
2808 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2809 {
2810     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2811                        MO_TESL | s->insn->data);
2812     return DISAS_NEXT;
2813 }
2814 
2815 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2816 {
2817     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2818                        MO_TEUL | s->insn->data);
2819     return DISAS_NEXT;
2820 }
2821 
2822 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2823 {
2824     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2825                         MO_TEUQ | s->insn->data);
2826     return DISAS_NEXT;
2827 }
2828 
2829 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2830 {
2831     TCGLabel *lab = gen_new_label();
2832     store_reg32_i64(get_field(s, r1), o->in2);
2833     /* The value is stored even in case of trap. */
2834     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2835     gen_trap(s);
2836     gen_set_label(lab);
2837     return DISAS_NEXT;
2838 }
2839 
2840 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2841 {
2842     TCGLabel *lab = gen_new_label();
2843     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2844     /* The value is stored even in case of trap. */
2845     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2846     gen_trap(s);
2847     gen_set_label(lab);
2848     return DISAS_NEXT;
2849 }
2850 
2851 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2852 {
2853     TCGLabel *lab = gen_new_label();
2854     store_reg32h_i64(get_field(s, r1), o->in2);
2855     /* The value is stored even in case of trap. */
2856     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2857     gen_trap(s);
2858     gen_set_label(lab);
2859     return DISAS_NEXT;
2860 }
2861 
2862 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2863 {
2864     TCGLabel *lab = gen_new_label();
2865 
2866     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2867     /* The value is stored even in case of trap. */
2868     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2869     gen_trap(s);
2870     gen_set_label(lab);
2871     return DISAS_NEXT;
2872 }
2873 
2874 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2875 {
2876     TCGLabel *lab = gen_new_label();
2877     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2878     /* The value is stored even in case of trap. */
2879     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2880     gen_trap(s);
2881     gen_set_label(lab);
2882     return DISAS_NEXT;
2883 }
2884 
2885 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2886 {
2887     DisasCompare c;
2888 
2889     if (have_field(s, m3)) {
2890         /* LOAD * ON CONDITION */
2891         disas_jcc(s, &c, get_field(s, m3));
2892     } else {
2893         /* SELECT */
2894         disas_jcc(s, &c, get_field(s, m4));
2895     }
2896 
2897     if (c.is_64) {
2898         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2899                             o->in2, o->in1);
2900     } else {
2901         TCGv_i32 t32 = tcg_temp_new_i32();
2902         TCGv_i64 t, z;
2903 
2904         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2905 
2906         t = tcg_temp_new_i64();
2907         tcg_gen_extu_i32_i64(t, t32);
2908 
2909         z = tcg_constant_i64(0);
2910         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2911     }
2912 
2913     return DISAS_NEXT;
2914 }
2915 
2916 #ifndef CONFIG_USER_ONLY
2917 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2918 {
2919     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2920     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2921 
2922     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2923     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2924     s->exit_to_mainloop = true;
2925     return DISAS_TOO_MANY;
2926 }
2927 
2928 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2929 {
2930     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2931     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2932 
2933     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2934     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2935     s->exit_to_mainloop = true;
2936     return DISAS_TOO_MANY;
2937 }
2938 
2939 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2940 {
2941     gen_helper_lra(o->out, cpu_env, o->out, o->in2);
2942     set_cc_static(s);
2943     return DISAS_NEXT;
2944 }
2945 
2946 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2947 {
2948     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2949     return DISAS_NEXT;
2950 }
2951 
2952 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2953 {
2954     TCGv_i64 mask, addr;
2955 
2956     per_breaking_event(s);
2957 
2958     /*
2959      * Convert the short PSW into the normal PSW, similar to what
2960      * s390_cpu_load_normal() does.
2961      */
2962     mask = tcg_temp_new_i64();
2963     addr = tcg_temp_new_i64();
2964     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2965     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2966     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2967     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2968     gen_helper_load_psw(cpu_env, mask, addr);
2969     return DISAS_NORETURN;
2970 }
2971 
2972 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2973 {
2974     TCGv_i64 t1, t2;
2975 
2976     per_breaking_event(s);
2977 
2978     t1 = tcg_temp_new_i64();
2979     t2 = tcg_temp_new_i64();
2980     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2981                         MO_TEUQ | MO_ALIGN_8);
2982     tcg_gen_addi_i64(o->in2, o->in2, 8);
2983     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2984     gen_helper_load_psw(cpu_env, t1, t2);
2985     return DISAS_NORETURN;
2986 }
2987 #endif
2988 
2989 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2990 {
2991     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2992     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2993 
2994     gen_helper_lam(cpu_env, r1, o->in2, r3);
2995     return DISAS_NEXT;
2996 }
2997 
2998 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2999 {
3000     int r1 = get_field(s, r1);
3001     int r3 = get_field(s, r3);
3002     TCGv_i64 t1, t2;
3003 
3004     /* Only one register to read. */
3005     t1 = tcg_temp_new_i64();
3006     if (unlikely(r1 == r3)) {
3007         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3008         store_reg32_i64(r1, t1);
3009         return DISAS_NEXT;
3010     }
3011 
3012     /* First load the values of the first and last registers to trigger
3013        possible page faults. */
3014     t2 = tcg_temp_new_i64();
3015     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3016     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3017     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3018     store_reg32_i64(r1, t1);
3019     store_reg32_i64(r3, t2);
3020 
3021     /* Only two registers to read. */
3022     if (((r1 + 1) & 15) == r3) {
3023         return DISAS_NEXT;
3024     }
3025 
3026     /* Then load the remaining registers. Page fault can't occur. */
3027     r3 = (r3 - 1) & 15;
3028     tcg_gen_movi_i64(t2, 4);
3029     while (r1 != r3) {
3030         r1 = (r1 + 1) & 15;
3031         tcg_gen_add_i64(o->in2, o->in2, t2);
3032         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3033         store_reg32_i64(r1, t1);
3034     }
3035     return DISAS_NEXT;
3036 }
3037 
3038 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3039 {
3040     int r1 = get_field(s, r1);
3041     int r3 = get_field(s, r3);
3042     TCGv_i64 t1, t2;
3043 
3044     /* Only one register to read. */
3045     t1 = tcg_temp_new_i64();
3046     if (unlikely(r1 == r3)) {
3047         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3048         store_reg32h_i64(r1, t1);
3049         return DISAS_NEXT;
3050     }
3051 
3052     /* First load the values of the first and last registers to trigger
3053        possible page faults. */
3054     t2 = tcg_temp_new_i64();
3055     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3056     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3057     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3058     store_reg32h_i64(r1, t1);
3059     store_reg32h_i64(r3, t2);
3060 
3061     /* Only two registers to read. */
3062     if (((r1 + 1) & 15) == r3) {
3063         return DISAS_NEXT;
3064     }
3065 
3066     /* Then load the remaining registers. Page fault can't occur. */
3067     r3 = (r3 - 1) & 15;
3068     tcg_gen_movi_i64(t2, 4);
3069     while (r1 != r3) {
3070         r1 = (r1 + 1) & 15;
3071         tcg_gen_add_i64(o->in2, o->in2, t2);
3072         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3073         store_reg32h_i64(r1, t1);
3074     }
3075     return DISAS_NEXT;
3076 }
3077 
3078 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3079 {
3080     int r1 = get_field(s, r1);
3081     int r3 = get_field(s, r3);
3082     TCGv_i64 t1, t2;
3083 
3084     /* Only one register to read. */
3085     if (unlikely(r1 == r3)) {
3086         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3087         return DISAS_NEXT;
3088     }
3089 
3090     /* First load the values of the first and last registers to trigger
3091        possible page faults. */
3092     t1 = tcg_temp_new_i64();
3093     t2 = tcg_temp_new_i64();
3094     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3095     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3096     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3097     tcg_gen_mov_i64(regs[r1], t1);
3098 
3099     /* Only two registers to read. */
3100     if (((r1 + 1) & 15) == r3) {
3101         return DISAS_NEXT;
3102     }
3103 
3104     /* Then load the remaining registers. Page fault can't occur. */
3105     r3 = (r3 - 1) & 15;
3106     tcg_gen_movi_i64(t1, 8);
3107     while (r1 != r3) {
3108         r1 = (r1 + 1) & 15;
3109         tcg_gen_add_i64(o->in2, o->in2, t1);
3110         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3111     }
3112     return DISAS_NEXT;
3113 }
3114 
3115 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3116 {
3117     TCGv_i64 a1, a2;
3118     MemOp mop = s->insn->data;
3119 
3120     /* In a parallel context, stop the world and single step.  */
3121     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3122         update_psw_addr(s);
3123         update_cc_op(s);
3124         gen_exception(EXCP_ATOMIC);
3125         return DISAS_NORETURN;
3126     }
3127 
3128     /* In a serial context, perform the two loads ... */
3129     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3130     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3131     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3132     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3133 
3134     /* ... and indicate that we performed them while interlocked.  */
3135     gen_op_movi_cc(s, 0);
3136     return DISAS_NEXT;
3137 }
3138 
3139 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3140 {
3141     o->out_128 = tcg_temp_new_i128();
3142     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3143                          MO_TE | MO_128 | MO_ALIGN);
3144     return DISAS_NEXT;
3145 }
3146 
3147 #ifndef CONFIG_USER_ONLY
3148 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3149 {
3150     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3151     return DISAS_NEXT;
3152 }
3153 #endif
3154 
3155 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3156 {
3157     tcg_gen_andi_i64(o->out, o->in2, -256);
3158     return DISAS_NEXT;
3159 }
3160 
3161 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3162 {
3163     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3164 
3165     if (get_field(s, m3) > 6) {
3166         gen_program_exception(s, PGM_SPECIFICATION);
3167         return DISAS_NORETURN;
3168     }
3169 
3170     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3171     tcg_gen_neg_i64(o->addr1, o->addr1);
3172     tcg_gen_movi_i64(o->out, 16);
3173     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3174     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3175     return DISAS_NEXT;
3176 }
3177 
3178 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3179 {
3180     const uint8_t monitor_class = get_field(s, i2);
3181 
3182     if (monitor_class & 0xf0) {
3183         gen_program_exception(s, PGM_SPECIFICATION);
3184         return DISAS_NORETURN;
3185     }
3186 
3187 #if !defined(CONFIG_USER_ONLY)
3188     gen_helper_monitor_call(cpu_env, o->addr1,
3189                             tcg_constant_i32(monitor_class));
3190 #endif
3191     /* Defaults to a NOP. */
3192     return DISAS_NEXT;
3193 }
3194 
3195 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3196 {
3197     o->out = o->in2;
3198     o->in2 = NULL;
3199     return DISAS_NEXT;
3200 }
3201 
3202 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3203 {
3204     int b2 = get_field(s, b2);
3205     TCGv ar1 = tcg_temp_new_i64();
3206 
3207     o->out = o->in2;
3208     o->in2 = NULL;
3209 
3210     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3211     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3212         tcg_gen_movi_i64(ar1, 0);
3213         break;
3214     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3215         tcg_gen_movi_i64(ar1, 1);
3216         break;
3217     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3218         if (b2) {
3219             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3220         } else {
3221             tcg_gen_movi_i64(ar1, 0);
3222         }
3223         break;
3224     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3225         tcg_gen_movi_i64(ar1, 2);
3226         break;
3227     }
3228 
3229     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3230     return DISAS_NEXT;
3231 }
3232 
3233 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3234 {
3235     o->out = o->in1;
3236     o->out2 = o->in2;
3237     o->in1 = NULL;
3238     o->in2 = NULL;
3239     return DISAS_NEXT;
3240 }
3241 
3242 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3243 {
3244     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3245 
3246     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3247     return DISAS_NEXT;
3248 }
3249 
3250 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3251 {
3252     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3253     return DISAS_NEXT;
3254 }
3255 
3256 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3257 {
3258     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3259 
3260     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3261     return DISAS_NEXT;
3262 }
3263 
3264 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3265 {
3266     int r1 = get_field(s, r1);
3267     int r2 = get_field(s, r2);
3268     TCGv_i32 t1, t2;
3269 
3270     /* r1 and r2 must be even.  */
3271     if (r1 & 1 || r2 & 1) {
3272         gen_program_exception(s, PGM_SPECIFICATION);
3273         return DISAS_NORETURN;
3274     }
3275 
3276     t1 = tcg_constant_i32(r1);
3277     t2 = tcg_constant_i32(r2);
3278     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3279     set_cc_static(s);
3280     return DISAS_NEXT;
3281 }
3282 
3283 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3284 {
3285     int r1 = get_field(s, r1);
3286     int r3 = get_field(s, r3);
3287     TCGv_i32 t1, t3;
3288 
3289     /* r1 and r3 must be even.  */
3290     if (r1 & 1 || r3 & 1) {
3291         gen_program_exception(s, PGM_SPECIFICATION);
3292         return DISAS_NORETURN;
3293     }
3294 
3295     t1 = tcg_constant_i32(r1);
3296     t3 = tcg_constant_i32(r3);
3297     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3298     set_cc_static(s);
3299     return DISAS_NEXT;
3300 }
3301 
3302 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3303 {
3304     int r1 = get_field(s, r1);
3305     int r3 = get_field(s, r3);
3306     TCGv_i32 t1, t3;
3307 
3308     /* r1 and r3 must be even.  */
3309     if (r1 & 1 || r3 & 1) {
3310         gen_program_exception(s, PGM_SPECIFICATION);
3311         return DISAS_NORETURN;
3312     }
3313 
3314     t1 = tcg_constant_i32(r1);
3315     t3 = tcg_constant_i32(r3);
3316     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3317     set_cc_static(s);
3318     return DISAS_NEXT;
3319 }
3320 
3321 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3322 {
3323     int r3 = get_field(s, r3);
3324     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3325     set_cc_static(s);
3326     return DISAS_NEXT;
3327 }
3328 
3329 #ifndef CONFIG_USER_ONLY
3330 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3331 {
3332     int r1 = get_field(s, l1);
3333     int r3 = get_field(s, r3);
3334     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3335     set_cc_static(s);
3336     return DISAS_NEXT;
3337 }
3338 
3339 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3340 {
3341     int r1 = get_field(s, l1);
3342     int r3 = get_field(s, r3);
3343     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3344     set_cc_static(s);
3345     return DISAS_NEXT;
3346 }
3347 #endif
3348 
3349 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3350 {
3351     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3352 
3353     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3354     return DISAS_NEXT;
3355 }
3356 
3357 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3358 {
3359     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3360 
3361     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3362     return DISAS_NEXT;
3363 }
3364 
3365 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3366 {
3367     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3368     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3369 
3370     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3371     set_cc_static(s);
3372     return DISAS_NEXT;
3373 }
3374 
3375 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3376 {
3377     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3378     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3379 
3380     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3381     set_cc_static(s);
3382     return DISAS_NEXT;
3383 }
3384 
3385 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3386 {
3387     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3388 
3389     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3390     return DISAS_NEXT;
3391 }
3392 
3393 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3394 {
3395     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3396     return DISAS_NEXT;
3397 }
3398 
3399 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3400 {
3401     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3402     return DISAS_NEXT;
3403 }
3404 
3405 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3406 {
3407     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3408     return DISAS_NEXT;
3409 }
3410 
3411 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3412 {
3413     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3414     return DISAS_NEXT;
3415 }
3416 
3417 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3418 {
3419     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3420     return DISAS_NEXT;
3421 }
3422 
3423 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3424 {
3425     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3426     return DISAS_NEXT;
3427 }
3428 
3429 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3430 {
3431     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3432     return DISAS_NEXT;
3433 }
3434 
3435 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3436 {
3437     gen_helper_mxdb(o->out_128, cpu_env, o->in1, o->in2);
3438     return DISAS_NEXT;
3439 }
3440 
3441 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3442 {
3443     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3444     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3445     return DISAS_NEXT;
3446 }
3447 
3448 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3449 {
3450     TCGv_i64 r3 = load_freg(get_field(s, r3));
3451     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3452     return DISAS_NEXT;
3453 }
3454 
3455 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3456 {
3457     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3458     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3459     return DISAS_NEXT;
3460 }
3461 
3462 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3463 {
3464     TCGv_i64 r3 = load_freg(get_field(s, r3));
3465     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3466     return DISAS_NEXT;
3467 }
3468 
3469 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3470 {
3471     TCGv_i64 z = tcg_constant_i64(0);
3472     TCGv_i64 n = tcg_temp_new_i64();
3473 
3474     tcg_gen_neg_i64(n, o->in2);
3475     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3476     return DISAS_NEXT;
3477 }
3478 
3479 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3480 {
3481     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3482     return DISAS_NEXT;
3483 }
3484 
3485 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3486 {
3487     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3488     return DISAS_NEXT;
3489 }
3490 
3491 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3492 {
3493     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3494     tcg_gen_mov_i64(o->out2, o->in2);
3495     return DISAS_NEXT;
3496 }
3497 
3498 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3499 {
3500     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3501 
3502     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3503     set_cc_static(s);
3504     return DISAS_NEXT;
3505 }
3506 
3507 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3508 {
3509     tcg_gen_neg_i64(o->out, o->in2);
3510     return DISAS_NEXT;
3511 }
3512 
3513 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3514 {
3515     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3516     return DISAS_NEXT;
3517 }
3518 
3519 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3520 {
3521     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3522     return DISAS_NEXT;
3523 }
3524 
3525 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3526 {
3527     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3528     tcg_gen_mov_i64(o->out2, o->in2);
3529     return DISAS_NEXT;
3530 }
3531 
3532 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3533 {
3534     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3535 
3536     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3537     set_cc_static(s);
3538     return DISAS_NEXT;
3539 }
3540 
3541 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3542 {
3543     tcg_gen_or_i64(o->out, o->in1, o->in2);
3544     return DISAS_NEXT;
3545 }
3546 
3547 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3548 {
3549     int shift = s->insn->data & 0xff;
3550     int size = s->insn->data >> 8;
3551     uint64_t mask = ((1ull << size) - 1) << shift;
3552     TCGv_i64 t = tcg_temp_new_i64();
3553 
3554     tcg_gen_shli_i64(t, o->in2, shift);
3555     tcg_gen_or_i64(o->out, o->in1, t);
3556 
3557     /* Produce the CC from only the bits manipulated.  */
3558     tcg_gen_andi_i64(cc_dst, o->out, mask);
3559     set_cc_nz_u64(s, cc_dst);
3560     return DISAS_NEXT;
3561 }
3562 
3563 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3564 {
3565     o->in1 = tcg_temp_new_i64();
3566 
3567     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3568         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3569     } else {
3570         /* Perform the atomic operation in memory. */
3571         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3572                                     s->insn->data);
3573     }
3574 
3575     /* Recompute also for atomic case: needed for setting CC. */
3576     tcg_gen_or_i64(o->out, o->in1, o->in2);
3577 
3578     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3579         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3580     }
3581     return DISAS_NEXT;
3582 }
3583 
3584 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3585 {
3586     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3587 
3588     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3589     return DISAS_NEXT;
3590 }
3591 
3592 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3593 {
3594     int l2 = get_field(s, l2) + 1;
3595     TCGv_i32 l;
3596 
3597     /* The length must not exceed 32 bytes.  */
3598     if (l2 > 32) {
3599         gen_program_exception(s, PGM_SPECIFICATION);
3600         return DISAS_NORETURN;
3601     }
3602     l = tcg_constant_i32(l2);
3603     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3604     return DISAS_NEXT;
3605 }
3606 
3607 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3608 {
3609     int l2 = get_field(s, l2) + 1;
3610     TCGv_i32 l;
3611 
3612     /* The length must be even and should not exceed 64 bytes.  */
3613     if ((l2 & 1) || (l2 > 64)) {
3614         gen_program_exception(s, PGM_SPECIFICATION);
3615         return DISAS_NORETURN;
3616     }
3617     l = tcg_constant_i32(l2);
3618     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3619     return DISAS_NEXT;
3620 }
3621 
3622 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3623 {
3624     const uint8_t m3 = get_field(s, m3);
3625 
3626     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3627         tcg_gen_ctpop_i64(o->out, o->in2);
3628     } else {
3629         gen_helper_popcnt(o->out, o->in2);
3630     }
3631     return DISAS_NEXT;
3632 }
3633 
3634 #ifndef CONFIG_USER_ONLY
3635 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3636 {
3637     gen_helper_ptlb(cpu_env);
3638     return DISAS_NEXT;
3639 }
3640 #endif
3641 
3642 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3643 {
3644     int i3 = get_field(s, i3);
3645     int i4 = get_field(s, i4);
3646     int i5 = get_field(s, i5);
3647     int do_zero = i4 & 0x80;
3648     uint64_t mask, imask, pmask;
3649     int pos, len, rot;
3650 
3651     /* Adjust the arguments for the specific insn.  */
3652     switch (s->fields.op2) {
3653     case 0x55: /* risbg */
3654     case 0x59: /* risbgn */
3655         i3 &= 63;
3656         i4 &= 63;
3657         pmask = ~0;
3658         break;
3659     case 0x5d: /* risbhg */
3660         i3 &= 31;
3661         i4 &= 31;
3662         pmask = 0xffffffff00000000ull;
3663         break;
3664     case 0x51: /* risblg */
3665         i3 = (i3 & 31) + 32;
3666         i4 = (i4 & 31) + 32;
3667         pmask = 0x00000000ffffffffull;
3668         break;
3669     default:
3670         g_assert_not_reached();
3671     }
3672 
3673     /* MASK is the set of bits to be inserted from R2. */
3674     if (i3 <= i4) {
3675         /* [0...i3---i4...63] */
3676         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3677     } else {
3678         /* [0---i4...i3---63] */
3679         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3680     }
3681     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3682     mask &= pmask;
3683 
3684     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3685        insns, we need to keep the other half of the register.  */
3686     imask = ~mask | ~pmask;
3687     if (do_zero) {
3688         imask = ~pmask;
3689     }
3690 
3691     len = i4 - i3 + 1;
3692     pos = 63 - i4;
3693     rot = i5 & 63;
3694 
3695     /* In some cases we can implement this with extract.  */
3696     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3697         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3698         return DISAS_NEXT;
3699     }
3700 
3701     /* In some cases we can implement this with deposit.  */
3702     if (len > 0 && (imask == 0 || ~mask == imask)) {
3703         /* Note that we rotate the bits to be inserted to the lsb, not to
3704            the position as described in the PoO.  */
3705         rot = (rot - pos) & 63;
3706     } else {
3707         pos = -1;
3708     }
3709 
3710     /* Rotate the input as necessary.  */
3711     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3712 
3713     /* Insert the selected bits into the output.  */
3714     if (pos >= 0) {
3715         if (imask == 0) {
3716             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3717         } else {
3718             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3719         }
3720     } else if (imask == 0) {
3721         tcg_gen_andi_i64(o->out, o->in2, mask);
3722     } else {
3723         tcg_gen_andi_i64(o->in2, o->in2, mask);
3724         tcg_gen_andi_i64(o->out, o->out, imask);
3725         tcg_gen_or_i64(o->out, o->out, o->in2);
3726     }
3727     return DISAS_NEXT;
3728 }
3729 
3730 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3731 {
3732     int i3 = get_field(s, i3);
3733     int i4 = get_field(s, i4);
3734     int i5 = get_field(s, i5);
3735     TCGv_i64 orig_out;
3736     uint64_t mask;
3737 
3738     /* If this is a test-only form, arrange to discard the result.  */
3739     if (i3 & 0x80) {
3740         tcg_debug_assert(o->out != NULL);
3741         orig_out = o->out;
3742         o->out = tcg_temp_new_i64();
3743         tcg_gen_mov_i64(o->out, orig_out);
3744     }
3745 
3746     i3 &= 63;
3747     i4 &= 63;
3748     i5 &= 63;
3749 
3750     /* MASK is the set of bits to be operated on from R2.
3751        Take care for I3/I4 wraparound.  */
3752     mask = ~0ull >> i3;
3753     if (i3 <= i4) {
3754         mask ^= ~0ull >> i4 >> 1;
3755     } else {
3756         mask |= ~(~0ull >> i4 >> 1);
3757     }
3758 
3759     /* Rotate the input as necessary.  */
3760     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3761 
3762     /* Operate.  */
3763     switch (s->fields.op2) {
3764     case 0x54: /* AND */
3765         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3766         tcg_gen_and_i64(o->out, o->out, o->in2);
3767         break;
3768     case 0x56: /* OR */
3769         tcg_gen_andi_i64(o->in2, o->in2, mask);
3770         tcg_gen_or_i64(o->out, o->out, o->in2);
3771         break;
3772     case 0x57: /* XOR */
3773         tcg_gen_andi_i64(o->in2, o->in2, mask);
3774         tcg_gen_xor_i64(o->out, o->out, o->in2);
3775         break;
3776     default:
3777         abort();
3778     }
3779 
3780     /* Set the CC.  */
3781     tcg_gen_andi_i64(cc_dst, o->out, mask);
3782     set_cc_nz_u64(s, cc_dst);
3783     return DISAS_NEXT;
3784 }
3785 
3786 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3787 {
3788     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3789     return DISAS_NEXT;
3790 }
3791 
3792 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3793 {
3794     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3795     return DISAS_NEXT;
3796 }
3797 
3798 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3799 {
3800     tcg_gen_bswap64_i64(o->out, o->in2);
3801     return DISAS_NEXT;
3802 }
3803 
3804 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3805 {
3806     TCGv_i32 t1 = tcg_temp_new_i32();
3807     TCGv_i32 t2 = tcg_temp_new_i32();
3808     TCGv_i32 to = tcg_temp_new_i32();
3809     tcg_gen_extrl_i64_i32(t1, o->in1);
3810     tcg_gen_extrl_i64_i32(t2, o->in2);
3811     tcg_gen_rotl_i32(to, t1, t2);
3812     tcg_gen_extu_i32_i64(o->out, to);
3813     return DISAS_NEXT;
3814 }
3815 
3816 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3817 {
3818     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3819     return DISAS_NEXT;
3820 }
3821 
3822 #ifndef CONFIG_USER_ONLY
3823 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3824 {
3825     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3826     set_cc_static(s);
3827     return DISAS_NEXT;
3828 }
3829 
3830 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3831 {
3832     gen_helper_sacf(cpu_env, o->in2);
3833     /* Addressing mode has changed, so end the block.  */
3834     return DISAS_TOO_MANY;
3835 }
3836 #endif
3837 
3838 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3839 {
3840     int sam = s->insn->data;
3841     TCGv_i64 tsam;
3842     uint64_t mask;
3843 
3844     switch (sam) {
3845     case 0:
3846         mask = 0xffffff;
3847         break;
3848     case 1:
3849         mask = 0x7fffffff;
3850         break;
3851     default:
3852         mask = -1;
3853         break;
3854     }
3855 
3856     /* Bizarre but true, we check the address of the current insn for the
3857        specification exception, not the next to be executed.  Thus the PoO
3858        documents that Bad Things Happen two bytes before the end.  */
3859     if (s->base.pc_next & ~mask) {
3860         gen_program_exception(s, PGM_SPECIFICATION);
3861         return DISAS_NORETURN;
3862     }
3863     s->pc_tmp &= mask;
3864 
3865     tsam = tcg_constant_i64(sam);
3866     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3867 
3868     /* Always exit the TB, since we (may have) changed execution mode.  */
3869     return DISAS_TOO_MANY;
3870 }
3871 
3872 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3873 {
3874     int r1 = get_field(s, r1);
3875     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3876     return DISAS_NEXT;
3877 }
3878 
3879 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3880 {
3881     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3882     return DISAS_NEXT;
3883 }
3884 
3885 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3886 {
3887     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3888     return DISAS_NEXT;
3889 }
3890 
3891 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3892 {
3893     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3894     return DISAS_NEXT;
3895 }
3896 
3897 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3898 {
3899     gen_helper_sqeb(o->out, cpu_env, o->in2);
3900     return DISAS_NEXT;
3901 }
3902 
3903 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3904 {
3905     gen_helper_sqdb(o->out, cpu_env, o->in2);
3906     return DISAS_NEXT;
3907 }
3908 
3909 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3910 {
3911     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
3912     return DISAS_NEXT;
3913 }
3914 
3915 #ifndef CONFIG_USER_ONLY
3916 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3917 {
3918     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3919     set_cc_static(s);
3920     return DISAS_NEXT;
3921 }
3922 
3923 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3924 {
3925     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3926     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3927 
3928     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3929     set_cc_static(s);
3930     return DISAS_NEXT;
3931 }
3932 #endif
3933 
3934 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3935 {
3936     DisasCompare c;
3937     TCGv_i64 a, h;
3938     TCGLabel *lab;
3939     int r1;
3940 
3941     disas_jcc(s, &c, get_field(s, m3));
3942 
3943     /* We want to store when the condition is fulfilled, so branch
3944        out when it's not */
3945     c.cond = tcg_invert_cond(c.cond);
3946 
3947     lab = gen_new_label();
3948     if (c.is_64) {
3949         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3950     } else {
3951         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3952     }
3953 
3954     r1 = get_field(s, r1);
3955     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3956     switch (s->insn->data) {
3957     case 1: /* STOCG */
3958         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3959         break;
3960     case 0: /* STOC */
3961         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3962         break;
3963     case 2: /* STOCFH */
3964         h = tcg_temp_new_i64();
3965         tcg_gen_shri_i64(h, regs[r1], 32);
3966         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3967         break;
3968     default:
3969         g_assert_not_reached();
3970     }
3971 
3972     gen_set_label(lab);
3973     return DISAS_NEXT;
3974 }
3975 
3976 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3977 {
3978     TCGv_i64 t;
3979     uint64_t sign = 1ull << s->insn->data;
3980     if (s->insn->data == 31) {
3981         t = tcg_temp_new_i64();
3982         tcg_gen_shli_i64(t, o->in1, 32);
3983     } else {
3984         t = o->in1;
3985     }
3986     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3987     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3988     /* The arithmetic left shift is curious in that it does not affect
3989        the sign bit.  Copy that over from the source unchanged.  */
3990     tcg_gen_andi_i64(o->out, o->out, ~sign);
3991     tcg_gen_andi_i64(o->in1, o->in1, sign);
3992     tcg_gen_or_i64(o->out, o->out, o->in1);
3993     return DISAS_NEXT;
3994 }
3995 
3996 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3997 {
3998     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3999     return DISAS_NEXT;
4000 }
4001 
4002 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4003 {
4004     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4005     return DISAS_NEXT;
4006 }
4007 
4008 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4009 {
4010     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4011     return DISAS_NEXT;
4012 }
4013 
4014 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4015 {
4016     gen_helper_sfpc(cpu_env, o->in2);
4017     return DISAS_NEXT;
4018 }
4019 
4020 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4021 {
4022     gen_helper_sfas(cpu_env, o->in2);
4023     return DISAS_NEXT;
4024 }
4025 
4026 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4027 {
4028     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4029     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4030     gen_helper_srnm(cpu_env, o->addr1);
4031     return DISAS_NEXT;
4032 }
4033 
4034 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4035 {
4036     /* Bits 0-55 are are ignored. */
4037     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4038     gen_helper_srnm(cpu_env, o->addr1);
4039     return DISAS_NEXT;
4040 }
4041 
4042 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4043 {
4044     TCGv_i64 tmp = tcg_temp_new_i64();
4045 
4046     /* Bits other than 61-63 are ignored. */
4047     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4048 
4049     /* No need to call a helper, we don't implement dfp */
4050     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4051     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4052     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4053     return DISAS_NEXT;
4054 }
4055 
4056 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4057 {
4058     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4059     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4060     set_cc_static(s);
4061 
4062     tcg_gen_shri_i64(o->in1, o->in1, 24);
4063     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4064     return DISAS_NEXT;
4065 }
4066 
4067 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4068 {
4069     int b1 = get_field(s, b1);
4070     int d1 = get_field(s, d1);
4071     int b2 = get_field(s, b2);
4072     int d2 = get_field(s, d2);
4073     int r3 = get_field(s, r3);
4074     TCGv_i64 tmp = tcg_temp_new_i64();
4075 
4076     /* fetch all operands first */
4077     o->in1 = tcg_temp_new_i64();
4078     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4079     o->in2 = tcg_temp_new_i64();
4080     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4081     o->addr1 = tcg_temp_new_i64();
4082     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4083 
4084     /* load the third operand into r3 before modifying anything */
4085     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4086 
4087     /* subtract CPU timer from first operand and store in GR0 */
4088     gen_helper_stpt(tmp, cpu_env);
4089     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4090 
4091     /* store second operand in GR1 */
4092     tcg_gen_mov_i64(regs[1], o->in2);
4093     return DISAS_NEXT;
4094 }
4095 
4096 #ifndef CONFIG_USER_ONLY
4097 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4098 {
4099     tcg_gen_shri_i64(o->in2, o->in2, 4);
4100     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4101     return DISAS_NEXT;
4102 }
4103 
4104 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4105 {
4106     gen_helper_sske(cpu_env, o->in1, o->in2);
4107     return DISAS_NEXT;
4108 }
4109 
4110 static void gen_check_psw_mask(DisasContext *s)
4111 {
4112     TCGv_i64 reserved = tcg_temp_new_i64();
4113     TCGLabel *ok = gen_new_label();
4114 
4115     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4116     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4117     gen_program_exception(s, PGM_SPECIFICATION);
4118     gen_set_label(ok);
4119 }
4120 
4121 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4122 {
4123     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4124 
4125     gen_check_psw_mask(s);
4126 
4127     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4128     s->exit_to_mainloop = true;
4129     return DISAS_TOO_MANY;
4130 }
4131 
4132 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4133 {
4134     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4135     return DISAS_NEXT;
4136 }
4137 #endif
4138 
4139 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4140 {
4141     gen_helper_stck(o->out, cpu_env);
4142     /* ??? We don't implement clock states.  */
4143     gen_op_movi_cc(s, 0);
4144     return DISAS_NEXT;
4145 }
4146 
4147 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4148 {
4149     TCGv_i64 c1 = tcg_temp_new_i64();
4150     TCGv_i64 c2 = tcg_temp_new_i64();
4151     TCGv_i64 todpr = tcg_temp_new_i64();
4152     gen_helper_stck(c1, cpu_env);
4153     /* 16 bit value store in an uint32_t (only valid bits set) */
4154     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4155     /* Shift the 64-bit value into its place as a zero-extended
4156        104-bit value.  Note that "bit positions 64-103 are always
4157        non-zero so that they compare differently to STCK"; we set
4158        the least significant bit to 1.  */
4159     tcg_gen_shli_i64(c2, c1, 56);
4160     tcg_gen_shri_i64(c1, c1, 8);
4161     tcg_gen_ori_i64(c2, c2, 0x10000);
4162     tcg_gen_or_i64(c2, c2, todpr);
4163     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4164     tcg_gen_addi_i64(o->in2, o->in2, 8);
4165     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4166     /* ??? We don't implement clock states.  */
4167     gen_op_movi_cc(s, 0);
4168     return DISAS_NEXT;
4169 }
4170 
4171 #ifndef CONFIG_USER_ONLY
4172 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4173 {
4174     gen_helper_sck(cc_op, cpu_env, o->in2);
4175     set_cc_static(s);
4176     return DISAS_NEXT;
4177 }
4178 
4179 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4180 {
4181     gen_helper_sckc(cpu_env, o->in2);
4182     return DISAS_NEXT;
4183 }
4184 
4185 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4186 {
4187     gen_helper_sckpf(cpu_env, regs[0]);
4188     return DISAS_NEXT;
4189 }
4190 
4191 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4192 {
4193     gen_helper_stckc(o->out, cpu_env);
4194     return DISAS_NEXT;
4195 }
4196 
4197 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4198 {
4199     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4200     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4201 
4202     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4203     return DISAS_NEXT;
4204 }
4205 
4206 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4207 {
4208     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4209     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4210 
4211     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4212     return DISAS_NEXT;
4213 }
4214 
4215 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4216 {
4217     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4218     return DISAS_NEXT;
4219 }
4220 
4221 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4222 {
4223     gen_helper_spt(cpu_env, o->in2);
4224     return DISAS_NEXT;
4225 }
4226 
4227 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4228 {
4229     gen_helper_stfl(cpu_env);
4230     return DISAS_NEXT;
4231 }
4232 
4233 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4234 {
4235     gen_helper_stpt(o->out, cpu_env);
4236     return DISAS_NEXT;
4237 }
4238 
4239 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4240 {
4241     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4242     set_cc_static(s);
4243     return DISAS_NEXT;
4244 }
4245 
4246 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4247 {
4248     gen_helper_spx(cpu_env, o->in2);
4249     return DISAS_NEXT;
4250 }
4251 
4252 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4253 {
4254     gen_helper_xsch(cpu_env, regs[1]);
4255     set_cc_static(s);
4256     return DISAS_NEXT;
4257 }
4258 
4259 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4260 {
4261     gen_helper_csch(cpu_env, regs[1]);
4262     set_cc_static(s);
4263     return DISAS_NEXT;
4264 }
4265 
4266 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4267 {
4268     gen_helper_hsch(cpu_env, regs[1]);
4269     set_cc_static(s);
4270     return DISAS_NEXT;
4271 }
4272 
4273 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4274 {
4275     gen_helper_msch(cpu_env, regs[1], o->in2);
4276     set_cc_static(s);
4277     return DISAS_NEXT;
4278 }
4279 
4280 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4281 {
4282     gen_helper_rchp(cpu_env, regs[1]);
4283     set_cc_static(s);
4284     return DISAS_NEXT;
4285 }
4286 
4287 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4288 {
4289     gen_helper_rsch(cpu_env, regs[1]);
4290     set_cc_static(s);
4291     return DISAS_NEXT;
4292 }
4293 
4294 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4295 {
4296     gen_helper_sal(cpu_env, regs[1]);
4297     return DISAS_NEXT;
4298 }
4299 
4300 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4301 {
4302     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4303     return DISAS_NEXT;
4304 }
4305 
4306 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4307 {
4308     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4309     gen_op_movi_cc(s, 3);
4310     return DISAS_NEXT;
4311 }
4312 
4313 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4314 {
4315     /* The instruction is suppressed if not provided. */
4316     return DISAS_NEXT;
4317 }
4318 
4319 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4320 {
4321     gen_helper_ssch(cpu_env, regs[1], o->in2);
4322     set_cc_static(s);
4323     return DISAS_NEXT;
4324 }
4325 
4326 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4327 {
4328     gen_helper_stsch(cpu_env, regs[1], o->in2);
4329     set_cc_static(s);
4330     return DISAS_NEXT;
4331 }
4332 
4333 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4334 {
4335     gen_helper_stcrw(cpu_env, o->in2);
4336     set_cc_static(s);
4337     return DISAS_NEXT;
4338 }
4339 
4340 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4341 {
4342     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4343     set_cc_static(s);
4344     return DISAS_NEXT;
4345 }
4346 
4347 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4348 {
4349     gen_helper_tsch(cpu_env, regs[1], o->in2);
4350     set_cc_static(s);
4351     return DISAS_NEXT;
4352 }
4353 
4354 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4355 {
4356     gen_helper_chsc(cpu_env, o->in2);
4357     set_cc_static(s);
4358     return DISAS_NEXT;
4359 }
4360 
4361 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4362 {
4363     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4364     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4365     return DISAS_NEXT;
4366 }
4367 
4368 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4369 {
4370     uint64_t i2 = get_field(s, i2);
4371     TCGv_i64 t;
4372 
4373     /* It is important to do what the instruction name says: STORE THEN.
4374        If we let the output hook perform the store then if we fault and
4375        restart, we'll have the wrong SYSTEM MASK in place.  */
4376     t = tcg_temp_new_i64();
4377     tcg_gen_shri_i64(t, psw_mask, 56);
4378     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4379 
4380     if (s->fields.op == 0xac) {
4381         tcg_gen_andi_i64(psw_mask, psw_mask,
4382                          (i2 << 56) | 0x00ffffffffffffffull);
4383     } else {
4384         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4385     }
4386 
4387     gen_check_psw_mask(s);
4388 
4389     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4390     s->exit_to_mainloop = true;
4391     return DISAS_TOO_MANY;
4392 }
4393 
4394 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4395 {
4396     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4397 
4398     if (s->base.tb->flags & FLAG_MASK_PER) {
4399         update_psw_addr(s);
4400         gen_helper_per_store_real(cpu_env);
4401     }
4402     return DISAS_NEXT;
4403 }
4404 #endif
4405 
4406 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4407 {
4408     gen_helper_stfle(cc_op, cpu_env, o->in2);
4409     set_cc_static(s);
4410     return DISAS_NEXT;
4411 }
4412 
4413 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4414 {
4415     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4416     return DISAS_NEXT;
4417 }
4418 
4419 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4420 {
4421     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4422     return DISAS_NEXT;
4423 }
4424 
4425 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4426 {
4427     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4428                        MO_TEUL | s->insn->data);
4429     return DISAS_NEXT;
4430 }
4431 
4432 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4433 {
4434     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4435                         MO_TEUQ | s->insn->data);
4436     return DISAS_NEXT;
4437 }
4438 
4439 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4440 {
4441     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4442     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4443 
4444     gen_helper_stam(cpu_env, r1, o->in2, r3);
4445     return DISAS_NEXT;
4446 }
4447 
4448 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4449 {
4450     int m3 = get_field(s, m3);
4451     int pos, base = s->insn->data;
4452     TCGv_i64 tmp = tcg_temp_new_i64();
4453 
4454     pos = base + ctz32(m3) * 8;
4455     switch (m3) {
4456     case 0xf:
4457         /* Effectively a 32-bit store.  */
4458         tcg_gen_shri_i64(tmp, o->in1, pos);
4459         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4460         break;
4461 
4462     case 0xc:
4463     case 0x6:
4464     case 0x3:
4465         /* Effectively a 16-bit store.  */
4466         tcg_gen_shri_i64(tmp, o->in1, pos);
4467         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4468         break;
4469 
4470     case 0x8:
4471     case 0x4:
4472     case 0x2:
4473     case 0x1:
4474         /* Effectively an 8-bit store.  */
4475         tcg_gen_shri_i64(tmp, o->in1, pos);
4476         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4477         break;
4478 
4479     default:
4480         /* This is going to be a sequence of shifts and stores.  */
4481         pos = base + 32 - 8;
4482         while (m3) {
4483             if (m3 & 0x8) {
4484                 tcg_gen_shri_i64(tmp, o->in1, pos);
4485                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4486                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4487             }
4488             m3 = (m3 << 1) & 0xf;
4489             pos -= 8;
4490         }
4491         break;
4492     }
4493     return DISAS_NEXT;
4494 }
4495 
4496 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4497 {
4498     int r1 = get_field(s, r1);
4499     int r3 = get_field(s, r3);
4500     int size = s->insn->data;
4501     TCGv_i64 tsize = tcg_constant_i64(size);
4502 
4503     while (1) {
4504         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4505                             size == 8 ? MO_TEUQ : MO_TEUL);
4506         if (r1 == r3) {
4507             break;
4508         }
4509         tcg_gen_add_i64(o->in2, o->in2, tsize);
4510         r1 = (r1 + 1) & 15;
4511     }
4512 
4513     return DISAS_NEXT;
4514 }
4515 
4516 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4517 {
4518     int r1 = get_field(s, r1);
4519     int r3 = get_field(s, r3);
4520     TCGv_i64 t = tcg_temp_new_i64();
4521     TCGv_i64 t4 = tcg_constant_i64(4);
4522     TCGv_i64 t32 = tcg_constant_i64(32);
4523 
4524     while (1) {
4525         tcg_gen_shl_i64(t, regs[r1], t32);
4526         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4527         if (r1 == r3) {
4528             break;
4529         }
4530         tcg_gen_add_i64(o->in2, o->in2, t4);
4531         r1 = (r1 + 1) & 15;
4532     }
4533     return DISAS_NEXT;
4534 }
4535 
4536 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4537 {
4538     TCGv_i128 t16 = tcg_temp_new_i128();
4539 
4540     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4541     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4542                          MO_TE | MO_128 | MO_ALIGN);
4543     return DISAS_NEXT;
4544 }
4545 
4546 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4547 {
4548     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4549     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4550 
4551     gen_helper_srst(cpu_env, r1, r2);
4552     set_cc_static(s);
4553     return DISAS_NEXT;
4554 }
4555 
4556 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4557 {
4558     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4559     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4560 
4561     gen_helper_srstu(cpu_env, r1, r2);
4562     set_cc_static(s);
4563     return DISAS_NEXT;
4564 }
4565 
4566 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4567 {
4568     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4569     return DISAS_NEXT;
4570 }
4571 
4572 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4573 {
4574     tcg_gen_movi_i64(cc_src, 0);
4575     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4576     return DISAS_NEXT;
4577 }
4578 
4579 /* Compute borrow (0, -1) into cc_src. */
4580 static void compute_borrow(DisasContext *s)
4581 {
4582     switch (s->cc_op) {
4583     case CC_OP_SUBU:
4584         /* The borrow value is already in cc_src (0,-1). */
4585         break;
4586     default:
4587         gen_op_calc_cc(s);
4588         /* fall through */
4589     case CC_OP_STATIC:
4590         /* The carry flag is the msb of CC; compute into cc_src. */
4591         tcg_gen_extu_i32_i64(cc_src, cc_op);
4592         tcg_gen_shri_i64(cc_src, cc_src, 1);
4593         /* fall through */
4594     case CC_OP_ADDU:
4595         /* Convert carry (1,0) to borrow (0,-1). */
4596         tcg_gen_subi_i64(cc_src, cc_src, 1);
4597         break;
4598     }
4599 }
4600 
4601 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4602 {
4603     compute_borrow(s);
4604 
4605     /* Borrow is {0, -1}, so add to subtract. */
4606     tcg_gen_add_i64(o->out, o->in1, cc_src);
4607     tcg_gen_sub_i64(o->out, o->out, o->in2);
4608     return DISAS_NEXT;
4609 }
4610 
4611 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4612 {
4613     compute_borrow(s);
4614 
4615     /*
4616      * Borrow is {0, -1}, so add to subtract; replicate the
4617      * borrow input to produce 128-bit -1 for the addition.
4618      */
4619     TCGv_i64 zero = tcg_constant_i64(0);
4620     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4621     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4622 
4623     return DISAS_NEXT;
4624 }
4625 
4626 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4627 {
4628     TCGv_i32 t;
4629 
4630     update_psw_addr(s);
4631     update_cc_op(s);
4632 
4633     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4634     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4635 
4636     t = tcg_constant_i32(s->ilen);
4637     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4638 
4639     gen_exception(EXCP_SVC);
4640     return DISAS_NORETURN;
4641 }
4642 
4643 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4644 {
4645     int cc = 0;
4646 
4647     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4648     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4649     gen_op_movi_cc(s, cc);
4650     return DISAS_NEXT;
4651 }
4652 
4653 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4654 {
4655     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4656     set_cc_static(s);
4657     return DISAS_NEXT;
4658 }
4659 
4660 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4661 {
4662     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4663     set_cc_static(s);
4664     return DISAS_NEXT;
4665 }
4666 
4667 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4668 {
4669     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4670     set_cc_static(s);
4671     return DISAS_NEXT;
4672 }
4673 
4674 #ifndef CONFIG_USER_ONLY
4675 
4676 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4677 {
4678     gen_helper_testblock(cc_op, cpu_env, o->in2);
4679     set_cc_static(s);
4680     return DISAS_NEXT;
4681 }
4682 
4683 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4684 {
4685     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4686     set_cc_static(s);
4687     return DISAS_NEXT;
4688 }
4689 
4690 #endif
4691 
4692 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4693 {
4694     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4695 
4696     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4697     set_cc_static(s);
4698     return DISAS_NEXT;
4699 }
4700 
4701 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4702 {
4703     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4704 
4705     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4706     set_cc_static(s);
4707     return DISAS_NEXT;
4708 }
4709 
4710 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4711 {
4712     TCGv_i128 pair = tcg_temp_new_i128();
4713 
4714     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4715     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4716     set_cc_static(s);
4717     return DISAS_NEXT;
4718 }
4719 
4720 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4721 {
4722     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4723 
4724     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4725     set_cc_static(s);
4726     return DISAS_NEXT;
4727 }
4728 
4729 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4730 {
4731     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4732 
4733     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4734     set_cc_static(s);
4735     return DISAS_NEXT;
4736 }
4737 
4738 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4739 {
4740     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4741     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4742     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4743     TCGv_i32 tst = tcg_temp_new_i32();
4744     int m3 = get_field(s, m3);
4745 
4746     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4747         m3 = 0;
4748     }
4749     if (m3 & 1) {
4750         tcg_gen_movi_i32(tst, -1);
4751     } else {
4752         tcg_gen_extrl_i64_i32(tst, regs[0]);
4753         if (s->insn->opc & 3) {
4754             tcg_gen_ext8u_i32(tst, tst);
4755         } else {
4756             tcg_gen_ext16u_i32(tst, tst);
4757         }
4758     }
4759     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4760 
4761     set_cc_static(s);
4762     return DISAS_NEXT;
4763 }
4764 
4765 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4766 {
4767     TCGv_i32 t1 = tcg_constant_i32(0xff);
4768 
4769     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4770     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4771     set_cc_static(s);
4772     return DISAS_NEXT;
4773 }
4774 
4775 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4776 {
4777     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4778 
4779     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4780     return DISAS_NEXT;
4781 }
4782 
4783 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4784 {
4785     int l1 = get_field(s, l1) + 1;
4786     TCGv_i32 l;
4787 
4788     /* The length must not exceed 32 bytes.  */
4789     if (l1 > 32) {
4790         gen_program_exception(s, PGM_SPECIFICATION);
4791         return DISAS_NORETURN;
4792     }
4793     l = tcg_constant_i32(l1);
4794     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4795     set_cc_static(s);
4796     return DISAS_NEXT;
4797 }
4798 
4799 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4800 {
4801     int l1 = get_field(s, l1) + 1;
4802     TCGv_i32 l;
4803 
4804     /* The length must be even and should not exceed 64 bytes.  */
4805     if ((l1 & 1) || (l1 > 64)) {
4806         gen_program_exception(s, PGM_SPECIFICATION);
4807         return DISAS_NORETURN;
4808     }
4809     l = tcg_constant_i32(l1);
4810     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4811     set_cc_static(s);
4812     return DISAS_NEXT;
4813 }
4814 
4815 
4816 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4817 {
4818     int d1 = get_field(s, d1);
4819     int d2 = get_field(s, d2);
4820     int b1 = get_field(s, b1);
4821     int b2 = get_field(s, b2);
4822     int l = get_field(s, l1);
4823     TCGv_i32 t32;
4824 
4825     o->addr1 = get_address(s, 0, b1, d1);
4826 
4827     /* If the addresses are identical, this is a store/memset of zero.  */
4828     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4829         o->in2 = tcg_constant_i64(0);
4830 
4831         l++;
4832         while (l >= 8) {
4833             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4834             l -= 8;
4835             if (l > 0) {
4836                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4837             }
4838         }
4839         if (l >= 4) {
4840             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4841             l -= 4;
4842             if (l > 0) {
4843                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4844             }
4845         }
4846         if (l >= 2) {
4847             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4848             l -= 2;
4849             if (l > 0) {
4850                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4851             }
4852         }
4853         if (l) {
4854             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4855         }
4856         gen_op_movi_cc(s, 0);
4857         return DISAS_NEXT;
4858     }
4859 
4860     /* But in general we'll defer to a helper.  */
4861     o->in2 = get_address(s, 0, b2, d2);
4862     t32 = tcg_constant_i32(l);
4863     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4864     set_cc_static(s);
4865     return DISAS_NEXT;
4866 }
4867 
4868 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4869 {
4870     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4871     return DISAS_NEXT;
4872 }
4873 
4874 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4875 {
4876     int shift = s->insn->data & 0xff;
4877     int size = s->insn->data >> 8;
4878     uint64_t mask = ((1ull << size) - 1) << shift;
4879     TCGv_i64 t = tcg_temp_new_i64();
4880 
4881     tcg_gen_shli_i64(t, o->in2, shift);
4882     tcg_gen_xor_i64(o->out, o->in1, t);
4883 
4884     /* Produce the CC from only the bits manipulated.  */
4885     tcg_gen_andi_i64(cc_dst, o->out, mask);
4886     set_cc_nz_u64(s, cc_dst);
4887     return DISAS_NEXT;
4888 }
4889 
4890 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4891 {
4892     o->in1 = tcg_temp_new_i64();
4893 
4894     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4895         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4896     } else {
4897         /* Perform the atomic operation in memory. */
4898         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4899                                      s->insn->data);
4900     }
4901 
4902     /* Recompute also for atomic case: needed for setting CC. */
4903     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4904 
4905     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4906         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4907     }
4908     return DISAS_NEXT;
4909 }
4910 
4911 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4912 {
4913     o->out = tcg_constant_i64(0);
4914     return DISAS_NEXT;
4915 }
4916 
4917 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4918 {
4919     o->out = tcg_constant_i64(0);
4920     o->out2 = o->out;
4921     return DISAS_NEXT;
4922 }
4923 
4924 #ifndef CONFIG_USER_ONLY
4925 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4926 {
4927     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4928 
4929     gen_helper_clp(cpu_env, r2);
4930     set_cc_static(s);
4931     return DISAS_NEXT;
4932 }
4933 
4934 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4935 {
4936     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4937     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4938 
4939     gen_helper_pcilg(cpu_env, r1, r2);
4940     set_cc_static(s);
4941     return DISAS_NEXT;
4942 }
4943 
4944 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4945 {
4946     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4947     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4948 
4949     gen_helper_pcistg(cpu_env, r1, r2);
4950     set_cc_static(s);
4951     return DISAS_NEXT;
4952 }
4953 
4954 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4955 {
4956     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4957     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4958 
4959     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4960     set_cc_static(s);
4961     return DISAS_NEXT;
4962 }
4963 
4964 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4965 {
4966     gen_helper_sic(cpu_env, o->in1, o->in2);
4967     return DISAS_NEXT;
4968 }
4969 
4970 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4971 {
4972     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4973     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4974 
4975     gen_helper_rpcit(cpu_env, r1, r2);
4976     set_cc_static(s);
4977     return DISAS_NEXT;
4978 }
4979 
4980 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4981 {
4982     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4983     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4984     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4985 
4986     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4987     set_cc_static(s);
4988     return DISAS_NEXT;
4989 }
4990 
4991 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4992 {
4993     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4994     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4995 
4996     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4997     set_cc_static(s);
4998     return DISAS_NEXT;
4999 }
5000 #endif
5001 
5002 #include "translate_vx.c.inc"
5003 
5004 /* ====================================================================== */
5005 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5006    the original inputs), update the various cc data structures in order to
5007    be able to compute the new condition code.  */
5008 
5009 static void cout_abs32(DisasContext *s, DisasOps *o)
5010 {
5011     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5012 }
5013 
5014 static void cout_abs64(DisasContext *s, DisasOps *o)
5015 {
5016     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5017 }
5018 
5019 static void cout_adds32(DisasContext *s, DisasOps *o)
5020 {
5021     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5022 }
5023 
5024 static void cout_adds64(DisasContext *s, DisasOps *o)
5025 {
5026     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5027 }
5028 
5029 static void cout_addu32(DisasContext *s, DisasOps *o)
5030 {
5031     tcg_gen_shri_i64(cc_src, o->out, 32);
5032     tcg_gen_ext32u_i64(cc_dst, o->out);
5033     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5034 }
5035 
5036 static void cout_addu64(DisasContext *s, DisasOps *o)
5037 {
5038     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5039 }
5040 
5041 static void cout_cmps32(DisasContext *s, DisasOps *o)
5042 {
5043     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5044 }
5045 
5046 static void cout_cmps64(DisasContext *s, DisasOps *o)
5047 {
5048     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5049 }
5050 
5051 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5052 {
5053     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5054 }
5055 
5056 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5057 {
5058     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5059 }
5060 
5061 static void cout_f32(DisasContext *s, DisasOps *o)
5062 {
5063     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5064 }
5065 
5066 static void cout_f64(DisasContext *s, DisasOps *o)
5067 {
5068     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5069 }
5070 
5071 static void cout_f128(DisasContext *s, DisasOps *o)
5072 {
5073     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5074 }
5075 
5076 static void cout_nabs32(DisasContext *s, DisasOps *o)
5077 {
5078     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5079 }
5080 
5081 static void cout_nabs64(DisasContext *s, DisasOps *o)
5082 {
5083     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5084 }
5085 
5086 static void cout_neg32(DisasContext *s, DisasOps *o)
5087 {
5088     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5089 }
5090 
5091 static void cout_neg64(DisasContext *s, DisasOps *o)
5092 {
5093     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5094 }
5095 
5096 static void cout_nz32(DisasContext *s, DisasOps *o)
5097 {
5098     tcg_gen_ext32u_i64(cc_dst, o->out);
5099     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5100 }
5101 
5102 static void cout_nz64(DisasContext *s, DisasOps *o)
5103 {
5104     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5105 }
5106 
5107 static void cout_s32(DisasContext *s, DisasOps *o)
5108 {
5109     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5110 }
5111 
5112 static void cout_s64(DisasContext *s, DisasOps *o)
5113 {
5114     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5115 }
5116 
5117 static void cout_subs32(DisasContext *s, DisasOps *o)
5118 {
5119     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5120 }
5121 
5122 static void cout_subs64(DisasContext *s, DisasOps *o)
5123 {
5124     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5125 }
5126 
5127 static void cout_subu32(DisasContext *s, DisasOps *o)
5128 {
5129     tcg_gen_sari_i64(cc_src, o->out, 32);
5130     tcg_gen_ext32u_i64(cc_dst, o->out);
5131     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5132 }
5133 
5134 static void cout_subu64(DisasContext *s, DisasOps *o)
5135 {
5136     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5137 }
5138 
5139 static void cout_tm32(DisasContext *s, DisasOps *o)
5140 {
5141     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5142 }
5143 
5144 static void cout_tm64(DisasContext *s, DisasOps *o)
5145 {
5146     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5147 }
5148 
5149 static void cout_muls32(DisasContext *s, DisasOps *o)
5150 {
5151     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5152 }
5153 
5154 static void cout_muls64(DisasContext *s, DisasOps *o)
5155 {
5156     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5157     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5158 }
5159 
5160 /* ====================================================================== */
5161 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5162    with the TCG register to which we will write.  Used in combination with
5163    the "wout" generators, in some cases we need a new temporary, and in
5164    some cases we can write to a TCG global.  */
5165 
5166 static void prep_new(DisasContext *s, DisasOps *o)
5167 {
5168     o->out = tcg_temp_new_i64();
5169 }
5170 #define SPEC_prep_new 0
5171 
5172 static void prep_new_P(DisasContext *s, DisasOps *o)
5173 {
5174     o->out = tcg_temp_new_i64();
5175     o->out2 = tcg_temp_new_i64();
5176 }
5177 #define SPEC_prep_new_P 0
5178 
5179 static void prep_new_x(DisasContext *s, DisasOps *o)
5180 {
5181     o->out_128 = tcg_temp_new_i128();
5182 }
5183 #define SPEC_prep_new_x 0
5184 
5185 static void prep_r1(DisasContext *s, DisasOps *o)
5186 {
5187     o->out = regs[get_field(s, r1)];
5188 }
5189 #define SPEC_prep_r1 0
5190 
5191 static void prep_r1_P(DisasContext *s, DisasOps *o)
5192 {
5193     int r1 = get_field(s, r1);
5194     o->out = regs[r1];
5195     o->out2 = regs[r1 + 1];
5196 }
5197 #define SPEC_prep_r1_P SPEC_r1_even
5198 
5199 /* ====================================================================== */
5200 /* The "Write OUTput" generators.  These generally perform some non-trivial
5201    copy of data to TCG globals, or to main memory.  The trivial cases are
5202    generally handled by having a "prep" generator install the TCG global
5203    as the destination of the operation.  */
5204 
5205 static void wout_r1(DisasContext *s, DisasOps *o)
5206 {
5207     store_reg(get_field(s, r1), o->out);
5208 }
5209 #define SPEC_wout_r1 0
5210 
5211 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5212 {
5213     store_reg(get_field(s, r1), o->out2);
5214 }
5215 #define SPEC_wout_out2_r1 0
5216 
5217 static void wout_r1_8(DisasContext *s, DisasOps *o)
5218 {
5219     int r1 = get_field(s, r1);
5220     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5221 }
5222 #define SPEC_wout_r1_8 0
5223 
5224 static void wout_r1_16(DisasContext *s, DisasOps *o)
5225 {
5226     int r1 = get_field(s, r1);
5227     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5228 }
5229 #define SPEC_wout_r1_16 0
5230 
5231 static void wout_r1_32(DisasContext *s, DisasOps *o)
5232 {
5233     store_reg32_i64(get_field(s, r1), o->out);
5234 }
5235 #define SPEC_wout_r1_32 0
5236 
5237 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5238 {
5239     store_reg32h_i64(get_field(s, r1), o->out);
5240 }
5241 #define SPEC_wout_r1_32h 0
5242 
5243 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5244 {
5245     int r1 = get_field(s, r1);
5246     store_reg32_i64(r1, o->out);
5247     store_reg32_i64(r1 + 1, o->out2);
5248 }
5249 #define SPEC_wout_r1_P32 SPEC_r1_even
5250 
5251 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5252 {
5253     int r1 = get_field(s, r1);
5254     TCGv_i64 t = tcg_temp_new_i64();
5255     store_reg32_i64(r1 + 1, o->out);
5256     tcg_gen_shri_i64(t, o->out, 32);
5257     store_reg32_i64(r1, t);
5258 }
5259 #define SPEC_wout_r1_D32 SPEC_r1_even
5260 
5261 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5262 {
5263     int r1 = get_field(s, r1);
5264     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5265 }
5266 #define SPEC_wout_r1_D64 SPEC_r1_even
5267 
5268 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5269 {
5270     int r3 = get_field(s, r3);
5271     store_reg32_i64(r3, o->out);
5272     store_reg32_i64(r3 + 1, o->out2);
5273 }
5274 #define SPEC_wout_r3_P32 SPEC_r3_even
5275 
5276 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5277 {
5278     int r3 = get_field(s, r3);
5279     store_reg(r3, o->out);
5280     store_reg(r3 + 1, o->out2);
5281 }
5282 #define SPEC_wout_r3_P64 SPEC_r3_even
5283 
5284 static void wout_e1(DisasContext *s, DisasOps *o)
5285 {
5286     store_freg32_i64(get_field(s, r1), o->out);
5287 }
5288 #define SPEC_wout_e1 0
5289 
5290 static void wout_f1(DisasContext *s, DisasOps *o)
5291 {
5292     store_freg(get_field(s, r1), o->out);
5293 }
5294 #define SPEC_wout_f1 0
5295 
5296 static void wout_x1(DisasContext *s, DisasOps *o)
5297 {
5298     int f1 = get_field(s, r1);
5299 
5300     /* Split out_128 into out+out2 for cout_f128. */
5301     tcg_debug_assert(o->out == NULL);
5302     o->out = tcg_temp_new_i64();
5303     o->out2 = tcg_temp_new_i64();
5304 
5305     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5306     store_freg(f1, o->out);
5307     store_freg(f1 + 2, o->out2);
5308 }
5309 #define SPEC_wout_x1 SPEC_r1_f128
5310 
5311 static void wout_x1_P(DisasContext *s, DisasOps *o)
5312 {
5313     int f1 = get_field(s, r1);
5314     store_freg(f1, o->out);
5315     store_freg(f1 + 2, o->out2);
5316 }
5317 #define SPEC_wout_x1_P SPEC_r1_f128
5318 
5319 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5320 {
5321     if (get_field(s, r1) != get_field(s, r2)) {
5322         store_reg32_i64(get_field(s, r1), o->out);
5323     }
5324 }
5325 #define SPEC_wout_cond_r1r2_32 0
5326 
5327 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5328 {
5329     if (get_field(s, r1) != get_field(s, r2)) {
5330         store_freg32_i64(get_field(s, r1), o->out);
5331     }
5332 }
5333 #define SPEC_wout_cond_e1e2 0
5334 
5335 static void wout_m1_8(DisasContext *s, DisasOps *o)
5336 {
5337     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5338 }
5339 #define SPEC_wout_m1_8 0
5340 
5341 static void wout_m1_16(DisasContext *s, DisasOps *o)
5342 {
5343     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5344 }
5345 #define SPEC_wout_m1_16 0
5346 
5347 #ifndef CONFIG_USER_ONLY
5348 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5349 {
5350     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5351 }
5352 #define SPEC_wout_m1_16a 0
5353 #endif
5354 
5355 static void wout_m1_32(DisasContext *s, DisasOps *o)
5356 {
5357     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5358 }
5359 #define SPEC_wout_m1_32 0
5360 
5361 #ifndef CONFIG_USER_ONLY
5362 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5363 {
5364     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5365 }
5366 #define SPEC_wout_m1_32a 0
5367 #endif
5368 
5369 static void wout_m1_64(DisasContext *s, DisasOps *o)
5370 {
5371     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5372 }
5373 #define SPEC_wout_m1_64 0
5374 
5375 #ifndef CONFIG_USER_ONLY
5376 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5377 {
5378     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5379 }
5380 #define SPEC_wout_m1_64a 0
5381 #endif
5382 
5383 static void wout_m2_32(DisasContext *s, DisasOps *o)
5384 {
5385     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5386 }
5387 #define SPEC_wout_m2_32 0
5388 
5389 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5390 {
5391     store_reg(get_field(s, r1), o->in2);
5392 }
5393 #define SPEC_wout_in2_r1 0
5394 
5395 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5396 {
5397     store_reg32_i64(get_field(s, r1), o->in2);
5398 }
5399 #define SPEC_wout_in2_r1_32 0
5400 
5401 /* ====================================================================== */
5402 /* The "INput 1" generators.  These load the first operand to an insn.  */
5403 
5404 static void in1_r1(DisasContext *s, DisasOps *o)
5405 {
5406     o->in1 = load_reg(get_field(s, r1));
5407 }
5408 #define SPEC_in1_r1 0
5409 
5410 static void in1_r1_o(DisasContext *s, DisasOps *o)
5411 {
5412     o->in1 = regs[get_field(s, r1)];
5413 }
5414 #define SPEC_in1_r1_o 0
5415 
5416 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5417 {
5418     o->in1 = tcg_temp_new_i64();
5419     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5420 }
5421 #define SPEC_in1_r1_32s 0
5422 
5423 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5424 {
5425     o->in1 = tcg_temp_new_i64();
5426     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5427 }
5428 #define SPEC_in1_r1_32u 0
5429 
5430 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5431 {
5432     o->in1 = tcg_temp_new_i64();
5433     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5434 }
5435 #define SPEC_in1_r1_sr32 0
5436 
5437 static void in1_r1p1(DisasContext *s, DisasOps *o)
5438 {
5439     o->in1 = load_reg(get_field(s, r1) + 1);
5440 }
5441 #define SPEC_in1_r1p1 SPEC_r1_even
5442 
5443 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5444 {
5445     o->in1 = regs[get_field(s, r1) + 1];
5446 }
5447 #define SPEC_in1_r1p1_o SPEC_r1_even
5448 
5449 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5450 {
5451     o->in1 = tcg_temp_new_i64();
5452     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5453 }
5454 #define SPEC_in1_r1p1_32s SPEC_r1_even
5455 
5456 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5457 {
5458     o->in1 = tcg_temp_new_i64();
5459     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5460 }
5461 #define SPEC_in1_r1p1_32u SPEC_r1_even
5462 
5463 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5464 {
5465     int r1 = get_field(s, r1);
5466     o->in1 = tcg_temp_new_i64();
5467     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5468 }
5469 #define SPEC_in1_r1_D32 SPEC_r1_even
5470 
5471 static void in1_r2(DisasContext *s, DisasOps *o)
5472 {
5473     o->in1 = load_reg(get_field(s, r2));
5474 }
5475 #define SPEC_in1_r2 0
5476 
5477 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5478 {
5479     o->in1 = tcg_temp_new_i64();
5480     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5481 }
5482 #define SPEC_in1_r2_sr32 0
5483 
5484 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5485 {
5486     o->in1 = tcg_temp_new_i64();
5487     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5488 }
5489 #define SPEC_in1_r2_32u 0
5490 
5491 static void in1_r3(DisasContext *s, DisasOps *o)
5492 {
5493     o->in1 = load_reg(get_field(s, r3));
5494 }
5495 #define SPEC_in1_r3 0
5496 
5497 static void in1_r3_o(DisasContext *s, DisasOps *o)
5498 {
5499     o->in1 = regs[get_field(s, r3)];
5500 }
5501 #define SPEC_in1_r3_o 0
5502 
5503 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5504 {
5505     o->in1 = tcg_temp_new_i64();
5506     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5507 }
5508 #define SPEC_in1_r3_32s 0
5509 
5510 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5511 {
5512     o->in1 = tcg_temp_new_i64();
5513     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5514 }
5515 #define SPEC_in1_r3_32u 0
5516 
5517 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5518 {
5519     int r3 = get_field(s, r3);
5520     o->in1 = tcg_temp_new_i64();
5521     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5522 }
5523 #define SPEC_in1_r3_D32 SPEC_r3_even
5524 
5525 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5526 {
5527     o->in1 = tcg_temp_new_i64();
5528     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5529 }
5530 #define SPEC_in1_r3_sr32 0
5531 
5532 static void in1_e1(DisasContext *s, DisasOps *o)
5533 {
5534     o->in1 = load_freg32_i64(get_field(s, r1));
5535 }
5536 #define SPEC_in1_e1 0
5537 
5538 static void in1_f1(DisasContext *s, DisasOps *o)
5539 {
5540     o->in1 = load_freg(get_field(s, r1));
5541 }
5542 #define SPEC_in1_f1 0
5543 
5544 static void in1_x1(DisasContext *s, DisasOps *o)
5545 {
5546     o->in1_128 = load_freg_128(get_field(s, r1));
5547 }
5548 #define SPEC_in1_x1 SPEC_r1_f128
5549 
5550 /* Load the high double word of an extended (128-bit) format FP number */
5551 static void in1_x2h(DisasContext *s, DisasOps *o)
5552 {
5553     o->in1 = load_freg(get_field(s, r2));
5554 }
5555 #define SPEC_in1_x2h SPEC_r2_f128
5556 
5557 static void in1_f3(DisasContext *s, DisasOps *o)
5558 {
5559     o->in1 = load_freg(get_field(s, r3));
5560 }
5561 #define SPEC_in1_f3 0
5562 
5563 static void in1_la1(DisasContext *s, DisasOps *o)
5564 {
5565     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5566 }
5567 #define SPEC_in1_la1 0
5568 
5569 static void in1_la2(DisasContext *s, DisasOps *o)
5570 {
5571     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5572     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5573 }
5574 #define SPEC_in1_la2 0
5575 
5576 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5577 {
5578     in1_la1(s, o);
5579     o->in1 = tcg_temp_new_i64();
5580     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5581 }
5582 #define SPEC_in1_m1_8u 0
5583 
5584 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5585 {
5586     in1_la1(s, o);
5587     o->in1 = tcg_temp_new_i64();
5588     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5589 }
5590 #define SPEC_in1_m1_16s 0
5591 
5592 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5593 {
5594     in1_la1(s, o);
5595     o->in1 = tcg_temp_new_i64();
5596     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5597 }
5598 #define SPEC_in1_m1_16u 0
5599 
5600 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5601 {
5602     in1_la1(s, o);
5603     o->in1 = tcg_temp_new_i64();
5604     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5605 }
5606 #define SPEC_in1_m1_32s 0
5607 
5608 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5609 {
5610     in1_la1(s, o);
5611     o->in1 = tcg_temp_new_i64();
5612     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5613 }
5614 #define SPEC_in1_m1_32u 0
5615 
5616 static void in1_m1_64(DisasContext *s, DisasOps *o)
5617 {
5618     in1_la1(s, o);
5619     o->in1 = tcg_temp_new_i64();
5620     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5621 }
5622 #define SPEC_in1_m1_64 0
5623 
5624 /* ====================================================================== */
5625 /* The "INput 2" generators.  These load the second operand to an insn.  */
5626 
5627 static void in2_r1_o(DisasContext *s, DisasOps *o)
5628 {
5629     o->in2 = regs[get_field(s, r1)];
5630 }
5631 #define SPEC_in2_r1_o 0
5632 
5633 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5634 {
5635     o->in2 = tcg_temp_new_i64();
5636     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5637 }
5638 #define SPEC_in2_r1_16u 0
5639 
5640 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5641 {
5642     o->in2 = tcg_temp_new_i64();
5643     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5644 }
5645 #define SPEC_in2_r1_32u 0
5646 
5647 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5648 {
5649     int r1 = get_field(s, r1);
5650     o->in2 = tcg_temp_new_i64();
5651     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5652 }
5653 #define SPEC_in2_r1_D32 SPEC_r1_even
5654 
5655 static void in2_r2(DisasContext *s, DisasOps *o)
5656 {
5657     o->in2 = load_reg(get_field(s, r2));
5658 }
5659 #define SPEC_in2_r2 0
5660 
5661 static void in2_r2_o(DisasContext *s, DisasOps *o)
5662 {
5663     o->in2 = regs[get_field(s, r2)];
5664 }
5665 #define SPEC_in2_r2_o 0
5666 
5667 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5668 {
5669     int r2 = get_field(s, r2);
5670     if (r2 != 0) {
5671         o->in2 = load_reg(r2);
5672     }
5673 }
5674 #define SPEC_in2_r2_nz 0
5675 
5676 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5677 {
5678     o->in2 = tcg_temp_new_i64();
5679     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5680 }
5681 #define SPEC_in2_r2_8s 0
5682 
5683 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5684 {
5685     o->in2 = tcg_temp_new_i64();
5686     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5687 }
5688 #define SPEC_in2_r2_8u 0
5689 
5690 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5691 {
5692     o->in2 = tcg_temp_new_i64();
5693     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5694 }
5695 #define SPEC_in2_r2_16s 0
5696 
5697 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5698 {
5699     o->in2 = tcg_temp_new_i64();
5700     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5701 }
5702 #define SPEC_in2_r2_16u 0
5703 
5704 static void in2_r3(DisasContext *s, DisasOps *o)
5705 {
5706     o->in2 = load_reg(get_field(s, r3));
5707 }
5708 #define SPEC_in2_r3 0
5709 
5710 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5711 {
5712     int r3 = get_field(s, r3);
5713     o->in2_128 = tcg_temp_new_i128();
5714     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5715 }
5716 #define SPEC_in2_r3_D64 SPEC_r3_even
5717 
5718 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5719 {
5720     o->in2 = tcg_temp_new_i64();
5721     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5722 }
5723 #define SPEC_in2_r3_sr32 0
5724 
5725 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5726 {
5727     o->in2 = tcg_temp_new_i64();
5728     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5729 }
5730 #define SPEC_in2_r3_32u 0
5731 
5732 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5733 {
5734     o->in2 = tcg_temp_new_i64();
5735     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5736 }
5737 #define SPEC_in2_r2_32s 0
5738 
5739 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5740 {
5741     o->in2 = tcg_temp_new_i64();
5742     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5743 }
5744 #define SPEC_in2_r2_32u 0
5745 
5746 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5747 {
5748     o->in2 = tcg_temp_new_i64();
5749     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5750 }
5751 #define SPEC_in2_r2_sr32 0
5752 
5753 static void in2_e2(DisasContext *s, DisasOps *o)
5754 {
5755     o->in2 = load_freg32_i64(get_field(s, r2));
5756 }
5757 #define SPEC_in2_e2 0
5758 
5759 static void in2_f2(DisasContext *s, DisasOps *o)
5760 {
5761     o->in2 = load_freg(get_field(s, r2));
5762 }
5763 #define SPEC_in2_f2 0
5764 
5765 static void in2_x2(DisasContext *s, DisasOps *o)
5766 {
5767     o->in2_128 = load_freg_128(get_field(s, r2));
5768 }
5769 #define SPEC_in2_x2 SPEC_r2_f128
5770 
5771 /* Load the low double word of an extended (128-bit) format FP number */
5772 static void in2_x2l(DisasContext *s, DisasOps *o)
5773 {
5774     o->in2 = load_freg(get_field(s, r2) + 2);
5775 }
5776 #define SPEC_in2_x2l SPEC_r2_f128
5777 
5778 static void in2_ra2(DisasContext *s, DisasOps *o)
5779 {
5780     int r2 = get_field(s, r2);
5781 
5782     /* Note: *don't* treat !r2 as 0, use the reg value. */
5783     o->in2 = tcg_temp_new_i64();
5784     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5785 }
5786 #define SPEC_in2_ra2 0
5787 
5788 static void in2_ra2_E(DisasContext *s, DisasOps *o)
5789 {
5790     return in2_ra2(s, o);
5791 }
5792 #define SPEC_in2_ra2_E SPEC_r2_even
5793 
5794 static void in2_a2(DisasContext *s, DisasOps *o)
5795 {
5796     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5797     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5798 }
5799 #define SPEC_in2_a2 0
5800 
5801 static TCGv gen_ri2(DisasContext *s)
5802 {
5803     TCGv ri2 = NULL;
5804     bool is_imm;
5805     int imm;
5806 
5807     disas_jdest(s, i2, is_imm, imm, ri2);
5808     if (is_imm) {
5809         ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5810     }
5811 
5812     return ri2;
5813 }
5814 
5815 static void in2_ri2(DisasContext *s, DisasOps *o)
5816 {
5817     o->in2 = gen_ri2(s);
5818 }
5819 #define SPEC_in2_ri2 0
5820 
5821 static void in2_sh(DisasContext *s, DisasOps *o)
5822 {
5823     int b2 = get_field(s, b2);
5824     int d2 = get_field(s, d2);
5825 
5826     if (b2 == 0) {
5827         o->in2 = tcg_constant_i64(d2 & 0x3f);
5828     } else {
5829         o->in2 = get_address(s, 0, b2, d2);
5830         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5831     }
5832 }
5833 #define SPEC_in2_sh 0
5834 
5835 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5836 {
5837     in2_a2(s, o);
5838     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5839 }
5840 #define SPEC_in2_m2_8u 0
5841 
5842 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5843 {
5844     in2_a2(s, o);
5845     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5846 }
5847 #define SPEC_in2_m2_16s 0
5848 
5849 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5850 {
5851     in2_a2(s, o);
5852     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5853 }
5854 #define SPEC_in2_m2_16u 0
5855 
5856 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5857 {
5858     in2_a2(s, o);
5859     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5860 }
5861 #define SPEC_in2_m2_32s 0
5862 
5863 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5864 {
5865     in2_a2(s, o);
5866     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5867 }
5868 #define SPEC_in2_m2_32u 0
5869 
5870 #ifndef CONFIG_USER_ONLY
5871 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5872 {
5873     in2_a2(s, o);
5874     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5875 }
5876 #define SPEC_in2_m2_32ua 0
5877 #endif
5878 
5879 static void in2_m2_64(DisasContext *s, DisasOps *o)
5880 {
5881     in2_a2(s, o);
5882     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5883 }
5884 #define SPEC_in2_m2_64 0
5885 
5886 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5887 {
5888     in2_a2(s, o);
5889     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5890     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5891 }
5892 #define SPEC_in2_m2_64w 0
5893 
5894 #ifndef CONFIG_USER_ONLY
5895 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5896 {
5897     in2_a2(s, o);
5898     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5899 }
5900 #define SPEC_in2_m2_64a 0
5901 #endif
5902 
5903 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5904 {
5905     o->in2 = tcg_temp_new_i64();
5906     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5907 }
5908 #define SPEC_in2_mri2_16s 0
5909 
5910 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5911 {
5912     o->in2 = tcg_temp_new_i64();
5913     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5914 }
5915 #define SPEC_in2_mri2_16u 0
5916 
5917 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5918 {
5919     o->in2 = tcg_temp_new_i64();
5920     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5921                        MO_TESL | MO_ALIGN);
5922 }
5923 #define SPEC_in2_mri2_32s 0
5924 
5925 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5926 {
5927     o->in2 = tcg_temp_new_i64();
5928     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5929                        MO_TEUL | MO_ALIGN);
5930 }
5931 #define SPEC_in2_mri2_32u 0
5932 
5933 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5934 {
5935     o->in2 = tcg_temp_new_i64();
5936     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5937                         MO_TEUQ | MO_ALIGN);
5938 }
5939 #define SPEC_in2_mri2_64 0
5940 
5941 static void in2_i2(DisasContext *s, DisasOps *o)
5942 {
5943     o->in2 = tcg_constant_i64(get_field(s, i2));
5944 }
5945 #define SPEC_in2_i2 0
5946 
5947 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5948 {
5949     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5950 }
5951 #define SPEC_in2_i2_8u 0
5952 
5953 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5954 {
5955     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5956 }
5957 #define SPEC_in2_i2_16u 0
5958 
5959 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5960 {
5961     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5962 }
5963 #define SPEC_in2_i2_32u 0
5964 
5965 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5966 {
5967     uint64_t i2 = (uint16_t)get_field(s, i2);
5968     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5969 }
5970 #define SPEC_in2_i2_16u_shl 0
5971 
5972 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5973 {
5974     uint64_t i2 = (uint32_t)get_field(s, i2);
5975     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5976 }
5977 #define SPEC_in2_i2_32u_shl 0
5978 
5979 #ifndef CONFIG_USER_ONLY
5980 static void in2_insn(DisasContext *s, DisasOps *o)
5981 {
5982     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5983 }
5984 #define SPEC_in2_insn 0
5985 #endif
5986 
5987 /* ====================================================================== */
5988 
5989 /* Find opc within the table of insns.  This is formulated as a switch
5990    statement so that (1) we get compile-time notice of cut-paste errors
5991    for duplicated opcodes, and (2) the compiler generates the binary
5992    search tree, rather than us having to post-process the table.  */
5993 
5994 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5995     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5996 
5997 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5998     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5999 
6000 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6001     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6002 
6003 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6004 
6005 enum DisasInsnEnum {
6006 #include "insn-data.h.inc"
6007 };
6008 
6009 #undef E
6010 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6011     .opc = OPC,                                                             \
6012     .flags = FL,                                                            \
6013     .fmt = FMT_##FT,                                                        \
6014     .fac = FAC_##FC,                                                        \
6015     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6016     .name = #NM,                                                            \
6017     .help_in1 = in1_##I1,                                                   \
6018     .help_in2 = in2_##I2,                                                   \
6019     .help_prep = prep_##P,                                                  \
6020     .help_wout = wout_##W,                                                  \
6021     .help_cout = cout_##CC,                                                 \
6022     .help_op = op_##OP,                                                     \
6023     .data = D                                                               \
6024  },
6025 
6026 /* Allow 0 to be used for NULL in the table below.  */
6027 #define in1_0  NULL
6028 #define in2_0  NULL
6029 #define prep_0  NULL
6030 #define wout_0  NULL
6031 #define cout_0  NULL
6032 #define op_0  NULL
6033 
6034 #define SPEC_in1_0 0
6035 #define SPEC_in2_0 0
6036 #define SPEC_prep_0 0
6037 #define SPEC_wout_0 0
6038 
6039 /* Give smaller names to the various facilities.  */
6040 #define FAC_Z           S390_FEAT_ZARCH
6041 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6042 #define FAC_DFP         S390_FEAT_DFP
6043 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6044 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6045 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6046 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6047 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6048 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6049 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6050 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6051 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6052 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6053 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6054 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6055 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6056 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6057 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6058 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6059 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6060 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6061 #define FAC_SFLE        S390_FEAT_STFLE
6062 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6063 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6064 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6065 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6066 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6067 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6068 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6069 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6070 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6071 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6072 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6073 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6074 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6075 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6076 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6077 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6078 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6079 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6080 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6081 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6082 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6083 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6084 
6085 static const DisasInsn insn_info[] = {
6086 #include "insn-data.h.inc"
6087 };
6088 
6089 #undef E
6090 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6091     case OPC: return &insn_info[insn_ ## NM];
6092 
6093 static const DisasInsn *lookup_opc(uint16_t opc)
6094 {
6095     switch (opc) {
6096 #include "insn-data.h.inc"
6097     default:
6098         return NULL;
6099     }
6100 }
6101 
6102 #undef F
6103 #undef E
6104 #undef D
6105 #undef C
6106 
6107 /* Extract a field from the insn.  The INSN should be left-aligned in
6108    the uint64_t so that we can more easily utilize the big-bit-endian
6109    definitions we extract from the Principals of Operation.  */
6110 
6111 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6112 {
6113     uint32_t r, m;
6114 
6115     if (f->size == 0) {
6116         return;
6117     }
6118 
6119     /* Zero extract the field from the insn.  */
6120     r = (insn << f->beg) >> (64 - f->size);
6121 
6122     /* Sign-extend, or un-swap the field as necessary.  */
6123     switch (f->type) {
6124     case 0: /* unsigned */
6125         break;
6126     case 1: /* signed */
6127         assert(f->size <= 32);
6128         m = 1u << (f->size - 1);
6129         r = (r ^ m) - m;
6130         break;
6131     case 2: /* dl+dh split, signed 20 bit. */
6132         r = ((int8_t)r << 12) | (r >> 8);
6133         break;
6134     case 3: /* MSB stored in RXB */
6135         g_assert(f->size == 4);
6136         switch (f->beg) {
6137         case 8:
6138             r |= extract64(insn, 63 - 36, 1) << 4;
6139             break;
6140         case 12:
6141             r |= extract64(insn, 63 - 37, 1) << 4;
6142             break;
6143         case 16:
6144             r |= extract64(insn, 63 - 38, 1) << 4;
6145             break;
6146         case 32:
6147             r |= extract64(insn, 63 - 39, 1) << 4;
6148             break;
6149         default:
6150             g_assert_not_reached();
6151         }
6152         break;
6153     default:
6154         abort();
6155     }
6156 
6157     /*
6158      * Validate that the "compressed" encoding we selected above is valid.
6159      * I.e. we haven't made two different original fields overlap.
6160      */
6161     assert(((o->presentC >> f->indexC) & 1) == 0);
6162     o->presentC |= 1 << f->indexC;
6163     o->presentO |= 1 << f->indexO;
6164 
6165     o->c[f->indexC] = r;
6166 }
6167 
6168 /* Lookup the insn at the current PC, extracting the operands into O and
6169    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6170 
6171 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6172 {
6173     uint64_t insn, pc = s->base.pc_next;
6174     int op, op2, ilen;
6175     const DisasInsn *info;
6176 
6177     if (unlikely(s->ex_value)) {
6178         /* Drop the EX data now, so that it's clear on exception paths.  */
6179         tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
6180                        offsetof(CPUS390XState, ex_value));
6181 
6182         /* Extract the values saved by EXECUTE.  */
6183         insn = s->ex_value & 0xffffffffffff0000ull;
6184         ilen = s->ex_value & 0xf;
6185 
6186         /* Register insn bytes with translator so plugins work. */
6187         for (int i = 0; i < ilen; i++) {
6188             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6189             translator_fake_ldb(byte, pc + i);
6190         }
6191         op = insn >> 56;
6192     } else {
6193         insn = ld_code2(env, s, pc);
6194         op = (insn >> 8) & 0xff;
6195         ilen = get_ilen(op);
6196         switch (ilen) {
6197         case 2:
6198             insn = insn << 48;
6199             break;
6200         case 4:
6201             insn = ld_code4(env, s, pc) << 32;
6202             break;
6203         case 6:
6204             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6205             break;
6206         default:
6207             g_assert_not_reached();
6208         }
6209     }
6210     s->pc_tmp = s->base.pc_next + ilen;
6211     s->ilen = ilen;
6212 
6213     /* We can't actually determine the insn format until we've looked up
6214        the full insn opcode.  Which we can't do without locating the
6215        secondary opcode.  Assume by default that OP2 is at bit 40; for
6216        those smaller insns that don't actually have a secondary opcode
6217        this will correctly result in OP2 = 0. */
6218     switch (op) {
6219     case 0x01: /* E */
6220     case 0x80: /* S */
6221     case 0x82: /* S */
6222     case 0x93: /* S */
6223     case 0xb2: /* S, RRF, RRE, IE */
6224     case 0xb3: /* RRE, RRD, RRF */
6225     case 0xb9: /* RRE, RRF */
6226     case 0xe5: /* SSE, SIL */
6227         op2 = (insn << 8) >> 56;
6228         break;
6229     case 0xa5: /* RI */
6230     case 0xa7: /* RI */
6231     case 0xc0: /* RIL */
6232     case 0xc2: /* RIL */
6233     case 0xc4: /* RIL */
6234     case 0xc6: /* RIL */
6235     case 0xc8: /* SSF */
6236     case 0xcc: /* RIL */
6237         op2 = (insn << 12) >> 60;
6238         break;
6239     case 0xc5: /* MII */
6240     case 0xc7: /* SMI */
6241     case 0xd0 ... 0xdf: /* SS */
6242     case 0xe1: /* SS */
6243     case 0xe2: /* SS */
6244     case 0xe8: /* SS */
6245     case 0xe9: /* SS */
6246     case 0xea: /* SS */
6247     case 0xee ... 0xf3: /* SS */
6248     case 0xf8 ... 0xfd: /* SS */
6249         op2 = 0;
6250         break;
6251     default:
6252         op2 = (insn << 40) >> 56;
6253         break;
6254     }
6255 
6256     memset(&s->fields, 0, sizeof(s->fields));
6257     s->fields.raw_insn = insn;
6258     s->fields.op = op;
6259     s->fields.op2 = op2;
6260 
6261     /* Lookup the instruction.  */
6262     info = lookup_opc(op << 8 | op2);
6263     s->insn = info;
6264 
6265     /* If we found it, extract the operands.  */
6266     if (info != NULL) {
6267         DisasFormat fmt = info->fmt;
6268         int i;
6269 
6270         for (i = 0; i < NUM_C_FIELD; ++i) {
6271             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6272         }
6273     }
6274     return info;
6275 }
6276 
6277 static bool is_afp_reg(int reg)
6278 {
6279     return reg % 2 || reg > 6;
6280 }
6281 
6282 static bool is_fp_pair(int reg)
6283 {
6284     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6285     return !(reg & 0x2);
6286 }
6287 
6288 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6289 {
6290     const DisasInsn *insn;
6291     DisasJumpType ret = DISAS_NEXT;
6292     DisasOps o = {};
6293     bool icount = false;
6294 
6295     /* Search for the insn in the table.  */
6296     insn = extract_insn(env, s);
6297 
6298     /* Update insn_start now that we know the ILEN.  */
6299     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6300 
6301     /* Not found means unimplemented/illegal opcode.  */
6302     if (insn == NULL) {
6303         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6304                       s->fields.op, s->fields.op2);
6305         gen_illegal_opcode(s);
6306         ret = DISAS_NORETURN;
6307         goto out;
6308     }
6309 
6310 #ifndef CONFIG_USER_ONLY
6311     if (s->base.tb->flags & FLAG_MASK_PER) {
6312         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6313         gen_helper_per_ifetch(cpu_env, addr);
6314     }
6315 #endif
6316 
6317     /* process flags */
6318     if (insn->flags) {
6319         /* privileged instruction */
6320         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6321             gen_program_exception(s, PGM_PRIVILEGED);
6322             ret = DISAS_NORETURN;
6323             goto out;
6324         }
6325 
6326         /* if AFP is not enabled, instructions and registers are forbidden */
6327         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6328             uint8_t dxc = 0;
6329 
6330             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6331                 dxc = 1;
6332             }
6333             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6334                 dxc = 1;
6335             }
6336             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6337                 dxc = 1;
6338             }
6339             if (insn->flags & IF_BFP) {
6340                 dxc = 2;
6341             }
6342             if (insn->flags & IF_DFP) {
6343                 dxc = 3;
6344             }
6345             if (insn->flags & IF_VEC) {
6346                 dxc = 0xfe;
6347             }
6348             if (dxc) {
6349                 gen_data_exception(dxc);
6350                 ret = DISAS_NORETURN;
6351                 goto out;
6352             }
6353         }
6354 
6355         /* if vector instructions not enabled, executing them is forbidden */
6356         if (insn->flags & IF_VEC) {
6357             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6358                 gen_data_exception(0xfe);
6359                 ret = DISAS_NORETURN;
6360                 goto out;
6361             }
6362         }
6363 
6364         /* input/output is the special case for icount mode */
6365         if (unlikely(insn->flags & IF_IO)) {
6366             icount = translator_io_start(&s->base);
6367         }
6368     }
6369 
6370     /* Check for insn specification exceptions.  */
6371     if (insn->spec) {
6372         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6373             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6374             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6375             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6376             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6377             gen_program_exception(s, PGM_SPECIFICATION);
6378             ret = DISAS_NORETURN;
6379             goto out;
6380         }
6381     }
6382 
6383     /* Implement the instruction.  */
6384     if (insn->help_in1) {
6385         insn->help_in1(s, &o);
6386     }
6387     if (insn->help_in2) {
6388         insn->help_in2(s, &o);
6389     }
6390     if (insn->help_prep) {
6391         insn->help_prep(s, &o);
6392     }
6393     if (insn->help_op) {
6394         ret = insn->help_op(s, &o);
6395     }
6396     if (ret != DISAS_NORETURN) {
6397         if (insn->help_wout) {
6398             insn->help_wout(s, &o);
6399         }
6400         if (insn->help_cout) {
6401             insn->help_cout(s, &o);
6402         }
6403     }
6404 
6405     /* io should be the last instruction in tb when icount is enabled */
6406     if (unlikely(icount && ret == DISAS_NEXT)) {
6407         ret = DISAS_TOO_MANY;
6408     }
6409 
6410 #ifndef CONFIG_USER_ONLY
6411     if (s->base.tb->flags & FLAG_MASK_PER) {
6412         /* An exception might be triggered, save PSW if not already done.  */
6413         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6414             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6415         }
6416 
6417         /* Call the helper to check for a possible PER exception.  */
6418         gen_helper_per_check_exception(cpu_env);
6419     }
6420 #endif
6421 
6422 out:
6423     /* Advance to the next instruction.  */
6424     s->base.pc_next = s->pc_tmp;
6425     return ret;
6426 }
6427 
6428 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6429 {
6430     DisasContext *dc = container_of(dcbase, DisasContext, base);
6431 
6432     /* 31-bit mode */
6433     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6434         dc->base.pc_first &= 0x7fffffff;
6435         dc->base.pc_next = dc->base.pc_first;
6436     }
6437 
6438     dc->cc_op = CC_OP_DYNAMIC;
6439     dc->ex_value = dc->base.tb->cs_base;
6440     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6441 }
6442 
6443 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6444 {
6445 }
6446 
6447 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6448 {
6449     DisasContext *dc = container_of(dcbase, DisasContext, base);
6450 
6451     /* Delay the set of ilen until we've read the insn. */
6452     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6453     dc->insn_start = tcg_last_op();
6454 }
6455 
6456 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6457                                 uint64_t pc)
6458 {
6459     uint64_t insn = cpu_lduw_code(env, pc);
6460 
6461     return pc + get_ilen((insn >> 8) & 0xff);
6462 }
6463 
6464 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6465 {
6466     CPUS390XState *env = cs->env_ptr;
6467     DisasContext *dc = container_of(dcbase, DisasContext, base);
6468 
6469     dc->base.is_jmp = translate_one(env, dc);
6470     if (dc->base.is_jmp == DISAS_NEXT) {
6471         if (dc->ex_value ||
6472             !is_same_page(dcbase, dc->base.pc_next) ||
6473             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6474             dc->base.is_jmp = DISAS_TOO_MANY;
6475         }
6476     }
6477 }
6478 
6479 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6480 {
6481     DisasContext *dc = container_of(dcbase, DisasContext, base);
6482 
6483     switch (dc->base.is_jmp) {
6484     case DISAS_NORETURN:
6485         break;
6486     case DISAS_TOO_MANY:
6487         update_psw_addr(dc);
6488         /* FALLTHRU */
6489     case DISAS_PC_UPDATED:
6490         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6491            cc op type is in env */
6492         update_cc_op(dc);
6493         /* FALLTHRU */
6494     case DISAS_PC_CC_UPDATED:
6495         /* Exit the TB, either by raising a debug exception or by return.  */
6496         if (dc->exit_to_mainloop) {
6497             tcg_gen_exit_tb(NULL, 0);
6498         } else {
6499             tcg_gen_lookup_and_goto_ptr();
6500         }
6501         break;
6502     default:
6503         g_assert_not_reached();
6504     }
6505 }
6506 
6507 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6508                                CPUState *cs, FILE *logfile)
6509 {
6510     DisasContext *dc = container_of(dcbase, DisasContext, base);
6511 
6512     if (unlikely(dc->ex_value)) {
6513         /* ??? Unfortunately target_disas can't use host memory.  */
6514         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6515     } else {
6516         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6517         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6518     }
6519 }
6520 
6521 static const TranslatorOps s390x_tr_ops = {
6522     .init_disas_context = s390x_tr_init_disas_context,
6523     .tb_start           = s390x_tr_tb_start,
6524     .insn_start         = s390x_tr_insn_start,
6525     .translate_insn     = s390x_tr_translate_insn,
6526     .tb_stop            = s390x_tr_tb_stop,
6527     .disas_log          = s390x_tr_disas_log,
6528 };
6529 
6530 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6531                            target_ulong pc, void *host_pc)
6532 {
6533     DisasContext dc;
6534 
6535     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6536 }
6537 
6538 void s390x_restore_state_to_opc(CPUState *cs,
6539                                 const TranslationBlock *tb,
6540                                 const uint64_t *data)
6541 {
6542     S390CPU *cpu = S390_CPU(cs);
6543     CPUS390XState *env = &cpu->env;
6544     int cc_op = data[1];
6545 
6546     env->psw.addr = data[0];
6547 
6548     /* Update the CC opcode if it is not already up-to-date.  */
6549     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6550         env->cc_op = cc_op;
6551     }
6552 
6553     /* Record ILEN.  */
6554     env->int_pgm_ilen = data[2];
6555 }
6556