xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision 06831001)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43 
44 #include "exec/translator.h"
45 #include "exec/log.h"
46 #include "qemu/atomic128.h"
47 
48 #define HELPER_H "helper.h"
49 #include "exec/helper-info.c.inc"
50 #undef  HELPER_H
51 
52 
53 /* Information that (most) every instruction needs to manipulate.  */
54 typedef struct DisasContext DisasContext;
55 typedef struct DisasInsn DisasInsn;
56 typedef struct DisasFields DisasFields;
57 
58 /*
59  * Define a structure to hold the decoded fields.  We'll store each inside
60  * an array indexed by an enum.  In order to conserve memory, we'll arrange
61  * for fields that do not exist at the same time to overlap, thus the "C"
62  * for compact.  For checking purposes there is an "O" for original index
63  * as well that will be applied to availability bitmaps.
64  */
65 
66 enum DisasFieldIndexO {
67     FLD_O_r1,
68     FLD_O_r2,
69     FLD_O_r3,
70     FLD_O_m1,
71     FLD_O_m3,
72     FLD_O_m4,
73     FLD_O_m5,
74     FLD_O_m6,
75     FLD_O_b1,
76     FLD_O_b2,
77     FLD_O_b4,
78     FLD_O_d1,
79     FLD_O_d2,
80     FLD_O_d4,
81     FLD_O_x2,
82     FLD_O_l1,
83     FLD_O_l2,
84     FLD_O_i1,
85     FLD_O_i2,
86     FLD_O_i3,
87     FLD_O_i4,
88     FLD_O_i5,
89     FLD_O_v1,
90     FLD_O_v2,
91     FLD_O_v3,
92     FLD_O_v4,
93 };
94 
95 enum DisasFieldIndexC {
96     FLD_C_r1 = 0,
97     FLD_C_m1 = 0,
98     FLD_C_b1 = 0,
99     FLD_C_i1 = 0,
100     FLD_C_v1 = 0,
101 
102     FLD_C_r2 = 1,
103     FLD_C_b2 = 1,
104     FLD_C_i2 = 1,
105 
106     FLD_C_r3 = 2,
107     FLD_C_m3 = 2,
108     FLD_C_i3 = 2,
109     FLD_C_v3 = 2,
110 
111     FLD_C_m4 = 3,
112     FLD_C_b4 = 3,
113     FLD_C_i4 = 3,
114     FLD_C_l1 = 3,
115     FLD_C_v4 = 3,
116 
117     FLD_C_i5 = 4,
118     FLD_C_d1 = 4,
119     FLD_C_m5 = 4,
120 
121     FLD_C_d2 = 5,
122     FLD_C_m6 = 5,
123 
124     FLD_C_d4 = 6,
125     FLD_C_x2 = 6,
126     FLD_C_l2 = 6,
127     FLD_C_v2 = 6,
128 
129     NUM_C_FIELD = 7
130 };
131 
132 struct DisasFields {
133     uint64_t raw_insn;
134     unsigned op:8;
135     unsigned op2:8;
136     unsigned presentC:16;
137     unsigned int presentO;
138     int c[NUM_C_FIELD];
139 };
140 
141 struct DisasContext {
142     DisasContextBase base;
143     const DisasInsn *insn;
144     TCGOp *insn_start;
145     DisasFields fields;
146     uint64_t ex_value;
147     /*
148      * During translate_one(), pc_tmp is used to determine the instruction
149      * to be executed after base.pc_next - e.g. next sequential instruction
150      * or a branch target.
151      */
152     uint64_t pc_tmp;
153     uint32_t ilen;
154     enum cc_op cc_op;
155     bool exit_to_mainloop;
156 };
157 
158 /* Information carried about a condition to be evaluated.  */
159 typedef struct {
160     TCGCond cond:8;
161     bool is_64;
162     union {
163         struct { TCGv_i64 a, b; } s64;
164         struct { TCGv_i32 a, b; } s32;
165     } u;
166 } DisasCompare;
167 
168 #ifdef DEBUG_INLINE_BRANCHES
169 static uint64_t inline_branch_hit[CC_OP_MAX];
170 static uint64_t inline_branch_miss[CC_OP_MAX];
171 #endif
172 
173 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 {
175     if (s->base.tb->flags & FLAG_MASK_32) {
176         if (s->base.tb->flags & FLAG_MASK_64) {
177             tcg_gen_movi_i64(out, pc);
178             return;
179         }
180         pc |= 0x80000000;
181     }
182     assert(!(s->base.tb->flags & FLAG_MASK_64));
183     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
184 }
185 
186 static TCGv_i64 psw_addr;
187 static TCGv_i64 psw_mask;
188 static TCGv_i64 gbea;
189 
190 static TCGv_i32 cc_op;
191 static TCGv_i64 cc_src;
192 static TCGv_i64 cc_dst;
193 static TCGv_i64 cc_vr;
194 
195 static char cpu_reg_names[16][4];
196 static TCGv_i64 regs[16];
197 
198 void s390x_translate_init(void)
199 {
200     int i;
201 
202     psw_addr = tcg_global_mem_new_i64(cpu_env,
203                                       offsetof(CPUS390XState, psw.addr),
204                                       "psw_addr");
205     psw_mask = tcg_global_mem_new_i64(cpu_env,
206                                       offsetof(CPUS390XState, psw.mask),
207                                       "psw_mask");
208     gbea = tcg_global_mem_new_i64(cpu_env,
209                                   offsetof(CPUS390XState, gbea),
210                                   "gbea");
211 
212     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
213                                    "cc_op");
214     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
215                                     "cc_src");
216     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
217                                     "cc_dst");
218     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
219                                    "cc_vr");
220 
221     for (i = 0; i < 16; i++) {
222         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
223         regs[i] = tcg_global_mem_new(cpu_env,
224                                      offsetof(CPUS390XState, regs[i]),
225                                      cpu_reg_names[i]);
226     }
227 }
228 
229 static inline int vec_full_reg_offset(uint8_t reg)
230 {
231     g_assert(reg < 32);
232     return offsetof(CPUS390XState, vregs[reg][0]);
233 }
234 
235 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
236 {
237     /* Convert element size (es) - e.g. MO_8 - to bytes */
238     const uint8_t bytes = 1 << es;
239     int offs = enr * bytes;
240 
241     /*
242      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
243      * of the 16 byte vector, on both, little and big endian systems.
244      *
245      * Big Endian (target/possible host)
246      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
247      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
248      * W:  [             0][             1] - [             2][             3]
249      * DW: [                             0] - [                             1]
250      *
251      * Little Endian (possible host)
252      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
253      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
254      * W:  [             1][             0] - [             3][             2]
255      * DW: [                             0] - [                             1]
256      *
257      * For 16 byte elements, the two 8 byte halves will not form a host
258      * int128 if the host is little endian, since they're in the wrong order.
259      * Some operations (e.g. xor) do not care. For operations like addition,
260      * the two 8 byte elements have to be loaded separately. Let's force all
261      * 16 byte operations to handle it in a special way.
262      */
263     g_assert(es <= MO_64);
264 #if !HOST_BIG_ENDIAN
265     offs ^= (8 - bytes);
266 #endif
267     return offs + vec_full_reg_offset(reg);
268 }
269 
270 static inline int freg64_offset(uint8_t reg)
271 {
272     g_assert(reg < 16);
273     return vec_reg_offset(reg, 0, MO_64);
274 }
275 
276 static inline int freg32_offset(uint8_t reg)
277 {
278     g_assert(reg < 16);
279     return vec_reg_offset(reg, 0, MO_32);
280 }
281 
282 static TCGv_i64 load_reg(int reg)
283 {
284     TCGv_i64 r = tcg_temp_new_i64();
285     tcg_gen_mov_i64(r, regs[reg]);
286     return r;
287 }
288 
289 static TCGv_i64 load_freg(int reg)
290 {
291     TCGv_i64 r = tcg_temp_new_i64();
292 
293     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
294     return r;
295 }
296 
297 static TCGv_i64 load_freg32_i64(int reg)
298 {
299     TCGv_i64 r = tcg_temp_new_i64();
300 
301     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
302     return r;
303 }
304 
305 static TCGv_i128 load_freg_128(int reg)
306 {
307     TCGv_i64 h = load_freg(reg);
308     TCGv_i64 l = load_freg(reg + 2);
309     TCGv_i128 r = tcg_temp_new_i128();
310 
311     tcg_gen_concat_i64_i128(r, l, h);
312     return r;
313 }
314 
315 static void store_reg(int reg, TCGv_i64 v)
316 {
317     tcg_gen_mov_i64(regs[reg], v);
318 }
319 
320 static void store_freg(int reg, TCGv_i64 v)
321 {
322     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
323 }
324 
325 static void store_reg32_i64(int reg, TCGv_i64 v)
326 {
327     /* 32 bit register writes keep the upper half */
328     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
329 }
330 
331 static void store_reg32h_i64(int reg, TCGv_i64 v)
332 {
333     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
334 }
335 
336 static void store_freg32_i64(int reg, TCGv_i64 v)
337 {
338     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
339 }
340 
341 static void update_psw_addr(DisasContext *s)
342 {
343     /* psw.addr */
344     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 }
346 
347 static void per_branch(DisasContext *s, bool to_next)
348 {
349 #ifndef CONFIG_USER_ONLY
350     tcg_gen_movi_i64(gbea, s->base.pc_next);
351 
352     if (s->base.tb->flags & FLAG_MASK_PER) {
353         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
354         gen_helper_per_branch(cpu_env, gbea, next_pc);
355     }
356 #endif
357 }
358 
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360                             TCGv_i64 arg1, TCGv_i64 arg2)
361 {
362 #ifndef CONFIG_USER_ONLY
363     if (s->base.tb->flags & FLAG_MASK_PER) {
364         TCGLabel *lab = gen_new_label();
365         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
366 
367         tcg_gen_movi_i64(gbea, s->base.pc_next);
368         gen_helper_per_branch(cpu_env, gbea, psw_addr);
369 
370         gen_set_label(lab);
371     } else {
372         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
373         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
374     }
375 #endif
376 }
377 
378 static void per_breaking_event(DisasContext *s)
379 {
380     tcg_gen_movi_i64(gbea, s->base.pc_next);
381 }
382 
383 static void update_cc_op(DisasContext *s)
384 {
385     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
386         tcg_gen_movi_i32(cc_op, s->cc_op);
387     }
388 }
389 
390 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
391                                 uint64_t pc)
392 {
393     return (uint64_t)translator_lduw(env, &s->base, pc);
394 }
395 
396 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
397                                 uint64_t pc)
398 {
399     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
400 }
401 
402 static int get_mem_index(DisasContext *s)
403 {
404 #ifdef CONFIG_USER_ONLY
405     return MMU_USER_IDX;
406 #else
407     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408         return MMU_REAL_IDX;
409     }
410 
411     switch (s->base.tb->flags & FLAG_MASK_ASC) {
412     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413         return MMU_PRIMARY_IDX;
414     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_SECONDARY_IDX;
416     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417         return MMU_HOME_IDX;
418     default:
419         g_assert_not_reached();
420         break;
421     }
422 #endif
423 }
424 
425 static void gen_exception(int excp)
426 {
427     gen_helper_exception(cpu_env, tcg_constant_i32(excp));
428 }
429 
430 static void gen_program_exception(DisasContext *s, int code)
431 {
432     /* Remember what pgm exeption this was.  */
433     tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
434                    offsetof(CPUS390XState, int_pgm_code));
435 
436     tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
437                    offsetof(CPUS390XState, int_pgm_ilen));
438 
439     /* update the psw */
440     update_psw_addr(s);
441 
442     /* Save off cc.  */
443     update_cc_op(s);
444 
445     /* Trigger exception.  */
446     gen_exception(EXCP_PGM);
447 }
448 
449 static inline void gen_illegal_opcode(DisasContext *s)
450 {
451     gen_program_exception(s, PGM_OPERATION);
452 }
453 
454 static inline void gen_data_exception(uint8_t dxc)
455 {
456     gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
457 }
458 
459 static inline void gen_trap(DisasContext *s)
460 {
461     /* Set DXC to 0xff */
462     gen_data_exception(0xff);
463 }
464 
465 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
466                                   int64_t imm)
467 {
468     tcg_gen_addi_i64(dst, src, imm);
469     if (!(s->base.tb->flags & FLAG_MASK_64)) {
470         if (s->base.tb->flags & FLAG_MASK_32) {
471             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
472         } else {
473             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
474         }
475     }
476 }
477 
478 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
479 {
480     TCGv_i64 tmp = tcg_temp_new_i64();
481 
482     /*
483      * Note that d2 is limited to 20 bits, signed.  If we crop negative
484      * displacements early we create larger immediate addends.
485      */
486     if (b2 && x2) {
487         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
488         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
489     } else if (b2) {
490         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
491     } else if (x2) {
492         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
493     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
494         if (s->base.tb->flags & FLAG_MASK_32) {
495             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
496         } else {
497             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
498         }
499     } else {
500         tcg_gen_movi_i64(tmp, d2);
501     }
502 
503     return tmp;
504 }
505 
506 static inline bool live_cc_data(DisasContext *s)
507 {
508     return (s->cc_op != CC_OP_DYNAMIC
509             && s->cc_op != CC_OP_STATIC
510             && s->cc_op > 3);
511 }
512 
513 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
514 {
515     if (live_cc_data(s)) {
516         tcg_gen_discard_i64(cc_src);
517         tcg_gen_discard_i64(cc_dst);
518         tcg_gen_discard_i64(cc_vr);
519     }
520     s->cc_op = CC_OP_CONST0 + val;
521 }
522 
523 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
524 {
525     if (live_cc_data(s)) {
526         tcg_gen_discard_i64(cc_src);
527         tcg_gen_discard_i64(cc_vr);
528     }
529     tcg_gen_mov_i64(cc_dst, dst);
530     s->cc_op = op;
531 }
532 
533 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
534                                   TCGv_i64 dst)
535 {
536     if (live_cc_data(s)) {
537         tcg_gen_discard_i64(cc_vr);
538     }
539     tcg_gen_mov_i64(cc_src, src);
540     tcg_gen_mov_i64(cc_dst, dst);
541     s->cc_op = op;
542 }
543 
544 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
545                                   TCGv_i64 dst, TCGv_i64 vr)
546 {
547     tcg_gen_mov_i64(cc_src, src);
548     tcg_gen_mov_i64(cc_dst, dst);
549     tcg_gen_mov_i64(cc_vr, vr);
550     s->cc_op = op;
551 }
552 
553 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
554 {
555     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
556 }
557 
558 /* CC value is in env->cc_op */
559 static void set_cc_static(DisasContext *s)
560 {
561     if (live_cc_data(s)) {
562         tcg_gen_discard_i64(cc_src);
563         tcg_gen_discard_i64(cc_dst);
564         tcg_gen_discard_i64(cc_vr);
565     }
566     s->cc_op = CC_OP_STATIC;
567 }
568 
569 /* calculates cc into cc_op */
570 static void gen_op_calc_cc(DisasContext *s)
571 {
572     TCGv_i32 local_cc_op = NULL;
573     TCGv_i64 dummy = NULL;
574 
575     switch (s->cc_op) {
576     default:
577         dummy = tcg_constant_i64(0);
578         /* FALLTHRU */
579     case CC_OP_ADD_64:
580     case CC_OP_SUB_64:
581     case CC_OP_ADD_32:
582     case CC_OP_SUB_32:
583         local_cc_op = tcg_constant_i32(s->cc_op);
584         break;
585     case CC_OP_CONST0:
586     case CC_OP_CONST1:
587     case CC_OP_CONST2:
588     case CC_OP_CONST3:
589     case CC_OP_STATIC:
590     case CC_OP_DYNAMIC:
591         break;
592     }
593 
594     switch (s->cc_op) {
595     case CC_OP_CONST0:
596     case CC_OP_CONST1:
597     case CC_OP_CONST2:
598     case CC_OP_CONST3:
599         /* s->cc_op is the cc value */
600         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
601         break;
602     case CC_OP_STATIC:
603         /* env->cc_op already is the cc value */
604         break;
605     case CC_OP_NZ:
606         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
607         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
608         break;
609     case CC_OP_ABS_64:
610     case CC_OP_NABS_64:
611     case CC_OP_ABS_32:
612     case CC_OP_NABS_32:
613     case CC_OP_LTGT0_32:
614     case CC_OP_LTGT0_64:
615     case CC_OP_COMP_32:
616     case CC_OP_COMP_64:
617     case CC_OP_NZ_F32:
618     case CC_OP_NZ_F64:
619     case CC_OP_FLOGR:
620     case CC_OP_LCBB:
621     case CC_OP_MULS_32:
622         /* 1 argument */
623         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
624         break;
625     case CC_OP_ADDU:
626     case CC_OP_ICM:
627     case CC_OP_LTGT_32:
628     case CC_OP_LTGT_64:
629     case CC_OP_LTUGTU_32:
630     case CC_OP_LTUGTU_64:
631     case CC_OP_TM_32:
632     case CC_OP_TM_64:
633     case CC_OP_SLA:
634     case CC_OP_SUBU:
635     case CC_OP_NZ_F128:
636     case CC_OP_VC:
637     case CC_OP_MULS_64:
638         /* 2 arguments */
639         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
640         break;
641     case CC_OP_ADD_64:
642     case CC_OP_SUB_64:
643     case CC_OP_ADD_32:
644     case CC_OP_SUB_32:
645         /* 3 arguments */
646         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
647         break;
648     case CC_OP_DYNAMIC:
649         /* unknown operation - assume 3 arguments and cc_op in env */
650         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
651         break;
652     default:
653         g_assert_not_reached();
654     }
655 
656     /* We now have cc in cc_op as constant */
657     set_cc_static(s);
658 }
659 
660 static bool use_goto_tb(DisasContext *s, uint64_t dest)
661 {
662     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
663         return false;
664     }
665     return translator_use_goto_tb(&s->base, dest);
666 }
667 
668 static void account_noninline_branch(DisasContext *s, int cc_op)
669 {
670 #ifdef DEBUG_INLINE_BRANCHES
671     inline_branch_miss[cc_op]++;
672 #endif
673 }
674 
675 static void account_inline_branch(DisasContext *s, int cc_op)
676 {
677 #ifdef DEBUG_INLINE_BRANCHES
678     inline_branch_hit[cc_op]++;
679 #endif
680 }
681 
682 /* Table of mask values to comparison codes, given a comparison as input.
683    For such, CC=3 should not be possible.  */
684 static const TCGCond ltgt_cond[16] = {
685     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
686     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
687     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
688     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
689     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
690     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
691     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
692     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
693 };
694 
695 /* Table of mask values to comparison codes, given a logic op as input.
696    For such, only CC=0 and CC=1 should be possible.  */
697 static const TCGCond nz_cond[16] = {
698     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
699     TCG_COND_NEVER, TCG_COND_NEVER,
700     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
701     TCG_COND_NE, TCG_COND_NE,
702     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
703     TCG_COND_EQ, TCG_COND_EQ,
704     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
705     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
706 };
707 
708 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
709    details required to generate a TCG comparison.  */
710 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
711 {
712     TCGCond cond;
713     enum cc_op old_cc_op = s->cc_op;
714 
715     if (mask == 15 || mask == 0) {
716         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
717         c->u.s32.a = cc_op;
718         c->u.s32.b = cc_op;
719         c->is_64 = false;
720         return;
721     }
722 
723     /* Find the TCG condition for the mask + cc op.  */
724     switch (old_cc_op) {
725     case CC_OP_LTGT0_32:
726     case CC_OP_LTGT0_64:
727     case CC_OP_LTGT_32:
728     case CC_OP_LTGT_64:
729         cond = ltgt_cond[mask];
730         if (cond == TCG_COND_NEVER) {
731             goto do_dynamic;
732         }
733         account_inline_branch(s, old_cc_op);
734         break;
735 
736     case CC_OP_LTUGTU_32:
737     case CC_OP_LTUGTU_64:
738         cond = tcg_unsigned_cond(ltgt_cond[mask]);
739         if (cond == TCG_COND_NEVER) {
740             goto do_dynamic;
741         }
742         account_inline_branch(s, old_cc_op);
743         break;
744 
745     case CC_OP_NZ:
746         cond = nz_cond[mask];
747         if (cond == TCG_COND_NEVER) {
748             goto do_dynamic;
749         }
750         account_inline_branch(s, old_cc_op);
751         break;
752 
753     case CC_OP_TM_32:
754     case CC_OP_TM_64:
755         switch (mask) {
756         case 8:
757             cond = TCG_COND_EQ;
758             break;
759         case 4 | 2 | 1:
760             cond = TCG_COND_NE;
761             break;
762         default:
763             goto do_dynamic;
764         }
765         account_inline_branch(s, old_cc_op);
766         break;
767 
768     case CC_OP_ICM:
769         switch (mask) {
770         case 8:
771             cond = TCG_COND_EQ;
772             break;
773         case 4 | 2 | 1:
774         case 4 | 2:
775             cond = TCG_COND_NE;
776             break;
777         default:
778             goto do_dynamic;
779         }
780         account_inline_branch(s, old_cc_op);
781         break;
782 
783     case CC_OP_FLOGR:
784         switch (mask & 0xa) {
785         case 8: /* src == 0 -> no one bit found */
786             cond = TCG_COND_EQ;
787             break;
788         case 2: /* src != 0 -> one bit found */
789             cond = TCG_COND_NE;
790             break;
791         default:
792             goto do_dynamic;
793         }
794         account_inline_branch(s, old_cc_op);
795         break;
796 
797     case CC_OP_ADDU:
798     case CC_OP_SUBU:
799         switch (mask) {
800         case 8 | 2: /* result == 0 */
801             cond = TCG_COND_EQ;
802             break;
803         case 4 | 1: /* result != 0 */
804             cond = TCG_COND_NE;
805             break;
806         case 8 | 4: /* !carry (borrow) */
807             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
808             break;
809         case 2 | 1: /* carry (!borrow) */
810             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
811             break;
812         default:
813             goto do_dynamic;
814         }
815         account_inline_branch(s, old_cc_op);
816         break;
817 
818     default:
819     do_dynamic:
820         /* Calculate cc value.  */
821         gen_op_calc_cc(s);
822         /* FALLTHRU */
823 
824     case CC_OP_STATIC:
825         /* Jump based on CC.  We'll load up the real cond below;
826            the assignment here merely avoids a compiler warning.  */
827         account_noninline_branch(s, old_cc_op);
828         old_cc_op = CC_OP_STATIC;
829         cond = TCG_COND_NEVER;
830         break;
831     }
832 
833     /* Load up the arguments of the comparison.  */
834     c->is_64 = true;
835     switch (old_cc_op) {
836     case CC_OP_LTGT0_32:
837         c->is_64 = false;
838         c->u.s32.a = tcg_temp_new_i32();
839         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
840         c->u.s32.b = tcg_constant_i32(0);
841         break;
842     case CC_OP_LTGT_32:
843     case CC_OP_LTUGTU_32:
844         c->is_64 = false;
845         c->u.s32.a = tcg_temp_new_i32();
846         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
847         c->u.s32.b = tcg_temp_new_i32();
848         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
849         break;
850 
851     case CC_OP_LTGT0_64:
852     case CC_OP_NZ:
853     case CC_OP_FLOGR:
854         c->u.s64.a = cc_dst;
855         c->u.s64.b = tcg_constant_i64(0);
856         break;
857     case CC_OP_LTGT_64:
858     case CC_OP_LTUGTU_64:
859         c->u.s64.a = cc_src;
860         c->u.s64.b = cc_dst;
861         break;
862 
863     case CC_OP_TM_32:
864     case CC_OP_TM_64:
865     case CC_OP_ICM:
866         c->u.s64.a = tcg_temp_new_i64();
867         c->u.s64.b = tcg_constant_i64(0);
868         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
869         break;
870 
871     case CC_OP_ADDU:
872     case CC_OP_SUBU:
873         c->is_64 = true;
874         c->u.s64.b = tcg_constant_i64(0);
875         switch (mask) {
876         case 8 | 2:
877         case 4 | 1: /* result */
878             c->u.s64.a = cc_dst;
879             break;
880         case 8 | 4:
881         case 2 | 1: /* carry */
882             c->u.s64.a = cc_src;
883             break;
884         default:
885             g_assert_not_reached();
886         }
887         break;
888 
889     case CC_OP_STATIC:
890         c->is_64 = false;
891         c->u.s32.a = cc_op;
892         switch (mask) {
893         case 0x8 | 0x4 | 0x2: /* cc != 3 */
894             cond = TCG_COND_NE;
895             c->u.s32.b = tcg_constant_i32(3);
896             break;
897         case 0x8 | 0x4 | 0x1: /* cc != 2 */
898             cond = TCG_COND_NE;
899             c->u.s32.b = tcg_constant_i32(2);
900             break;
901         case 0x8 | 0x2 | 0x1: /* cc != 1 */
902             cond = TCG_COND_NE;
903             c->u.s32.b = tcg_constant_i32(1);
904             break;
905         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
906             cond = TCG_COND_EQ;
907             c->u.s32.a = tcg_temp_new_i32();
908             c->u.s32.b = tcg_constant_i32(0);
909             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
910             break;
911         case 0x8 | 0x4: /* cc < 2 */
912             cond = TCG_COND_LTU;
913             c->u.s32.b = tcg_constant_i32(2);
914             break;
915         case 0x8: /* cc == 0 */
916             cond = TCG_COND_EQ;
917             c->u.s32.b = tcg_constant_i32(0);
918             break;
919         case 0x4 | 0x2 | 0x1: /* cc != 0 */
920             cond = TCG_COND_NE;
921             c->u.s32.b = tcg_constant_i32(0);
922             break;
923         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
924             cond = TCG_COND_NE;
925             c->u.s32.a = tcg_temp_new_i32();
926             c->u.s32.b = tcg_constant_i32(0);
927             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
928             break;
929         case 0x4: /* cc == 1 */
930             cond = TCG_COND_EQ;
931             c->u.s32.b = tcg_constant_i32(1);
932             break;
933         case 0x2 | 0x1: /* cc > 1 */
934             cond = TCG_COND_GTU;
935             c->u.s32.b = tcg_constant_i32(1);
936             break;
937         case 0x2: /* cc == 2 */
938             cond = TCG_COND_EQ;
939             c->u.s32.b = tcg_constant_i32(2);
940             break;
941         case 0x1: /* cc == 3 */
942             cond = TCG_COND_EQ;
943             c->u.s32.b = tcg_constant_i32(3);
944             break;
945         default:
946             /* CC is masked by something else: (8 >> cc) & mask.  */
947             cond = TCG_COND_NE;
948             c->u.s32.a = tcg_temp_new_i32();
949             c->u.s32.b = tcg_constant_i32(0);
950             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
951             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
952             break;
953         }
954         break;
955 
956     default:
957         abort();
958     }
959     c->cond = cond;
960 }
961 
962 /* ====================================================================== */
963 /* Define the insn format enumeration.  */
964 #define F0(N)                         FMT_##N,
965 #define F1(N, X1)                     F0(N)
966 #define F2(N, X1, X2)                 F0(N)
967 #define F3(N, X1, X2, X3)             F0(N)
968 #define F4(N, X1, X2, X3, X4)         F0(N)
969 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
970 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
971 
972 typedef enum {
973 #include "insn-format.h.inc"
974 } DisasFormat;
975 
976 #undef F0
977 #undef F1
978 #undef F2
979 #undef F3
980 #undef F4
981 #undef F5
982 #undef F6
983 
984 /* This is the way fields are to be accessed out of DisasFields.  */
985 #define have_field(S, F)  have_field1((S), FLD_O_##F)
986 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
987 
988 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
989 {
990     return (s->fields.presentO >> c) & 1;
991 }
992 
993 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
994                       enum DisasFieldIndexC c)
995 {
996     assert(have_field1(s, o));
997     return s->fields.c[c];
998 }
999 
1000 /* Describe the layout of each field in each format.  */
1001 typedef struct DisasField {
1002     unsigned int beg:8;
1003     unsigned int size:8;
1004     unsigned int type:2;
1005     unsigned int indexC:6;
1006     enum DisasFieldIndexO indexO:8;
1007 } DisasField;
1008 
1009 typedef struct DisasFormatInfo {
1010     DisasField op[NUM_C_FIELD];
1011 } DisasFormatInfo;
1012 
1013 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1014 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1015 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1016 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1017                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1018 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1020                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1021 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1022                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1023 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1025                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1026 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1027 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1028 
1029 #define F0(N)                     { { } },
1030 #define F1(N, X1)                 { { X1 } },
1031 #define F2(N, X1, X2)             { { X1, X2 } },
1032 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1033 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1034 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1035 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1036 
1037 static const DisasFormatInfo format_info[] = {
1038 #include "insn-format.h.inc"
1039 };
1040 
1041 #undef F0
1042 #undef F1
1043 #undef F2
1044 #undef F3
1045 #undef F4
1046 #undef F5
1047 #undef F6
1048 #undef R
1049 #undef M
1050 #undef V
1051 #undef BD
1052 #undef BXD
1053 #undef BDL
1054 #undef BXDL
1055 #undef I
1056 #undef L
1057 
1058 /* Generally, we'll extract operands into this structures, operate upon
1059    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1060    of routines below for more details.  */
1061 typedef struct {
1062     TCGv_i64 out, out2, in1, in2;
1063     TCGv_i64 addr1;
1064     TCGv_i128 out_128, in1_128, in2_128;
1065 } DisasOps;
1066 
1067 /* Instructions can place constraints on their operands, raising specification
1068    exceptions if they are violated.  To make this easy to automate, each "in1",
1069    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1070    of the following, or 0.  To make this easy to document, we'll put the
1071    SPEC_<name> defines next to <name>.  */
1072 
1073 #define SPEC_r1_even    1
1074 #define SPEC_r2_even    2
1075 #define SPEC_r3_even    4
1076 #define SPEC_r1_f128    8
1077 #define SPEC_r2_f128    16
1078 
1079 /* Return values from translate_one, indicating the state of the TB.  */
1080 
1081 /* We are not using a goto_tb (for whatever reason), but have updated
1082    the PC (for whatever reason), so there's no need to do it again on
1083    exiting the TB.  */
1084 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1085 
1086 /* We have updated the PC and CC values.  */
1087 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1088 
1089 
1090 /* Instruction flags */
1091 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1092 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1093 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1094 #define IF_BFP      0x0008      /* binary floating point instruction */
1095 #define IF_DFP      0x0010      /* decimal floating point instruction */
1096 #define IF_PRIV     0x0020      /* privileged instruction */
1097 #define IF_VEC      0x0040      /* vector instruction */
1098 #define IF_IO       0x0080      /* input/output instruction */
1099 
1100 struct DisasInsn {
1101     unsigned opc:16;
1102     unsigned flags:16;
1103     DisasFormat fmt:8;
1104     unsigned fac:8;
1105     unsigned spec:8;
1106 
1107     const char *name;
1108 
1109     /* Pre-process arguments before HELP_OP.  */
1110     void (*help_in1)(DisasContext *, DisasOps *);
1111     void (*help_in2)(DisasContext *, DisasOps *);
1112     void (*help_prep)(DisasContext *, DisasOps *);
1113 
1114     /*
1115      * Post-process output after HELP_OP.
1116      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1117      */
1118     void (*help_wout)(DisasContext *, DisasOps *);
1119     void (*help_cout)(DisasContext *, DisasOps *);
1120 
1121     /* Implement the operation itself.  */
1122     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1123 
1124     uint64_t data;
1125 };
1126 
1127 /* ====================================================================== */
1128 /* Miscellaneous helpers, used by several operations.  */
1129 
1130 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1131 {
1132     if (dest == s->pc_tmp) {
1133         per_branch(s, true);
1134         return DISAS_NEXT;
1135     }
1136     if (use_goto_tb(s, dest)) {
1137         update_cc_op(s);
1138         per_breaking_event(s);
1139         tcg_gen_goto_tb(0);
1140         tcg_gen_movi_i64(psw_addr, dest);
1141         tcg_gen_exit_tb(s->base.tb, 0);
1142         return DISAS_NORETURN;
1143     } else {
1144         tcg_gen_movi_i64(psw_addr, dest);
1145         per_branch(s, false);
1146         return DISAS_PC_UPDATED;
1147     }
1148 }
1149 
1150 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1151                                  bool is_imm, int imm, TCGv_i64 cdest)
1152 {
1153     DisasJumpType ret;
1154     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1155     TCGLabel *lab;
1156 
1157     /* Take care of the special cases first.  */
1158     if (c->cond == TCG_COND_NEVER) {
1159         ret = DISAS_NEXT;
1160         goto egress;
1161     }
1162     if (is_imm) {
1163         if (dest == s->pc_tmp) {
1164             /* Branch to next.  */
1165             per_branch(s, true);
1166             ret = DISAS_NEXT;
1167             goto egress;
1168         }
1169         if (c->cond == TCG_COND_ALWAYS) {
1170             ret = help_goto_direct(s, dest);
1171             goto egress;
1172         }
1173     } else {
1174         if (!cdest) {
1175             /* E.g. bcr %r0 -> no branch.  */
1176             ret = DISAS_NEXT;
1177             goto egress;
1178         }
1179         if (c->cond == TCG_COND_ALWAYS) {
1180             tcg_gen_mov_i64(psw_addr, cdest);
1181             per_branch(s, false);
1182             ret = DISAS_PC_UPDATED;
1183             goto egress;
1184         }
1185     }
1186 
1187     if (use_goto_tb(s, s->pc_tmp)) {
1188         if (is_imm && use_goto_tb(s, dest)) {
1189             /* Both exits can use goto_tb.  */
1190             update_cc_op(s);
1191 
1192             lab = gen_new_label();
1193             if (c->is_64) {
1194                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1195             } else {
1196                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1197             }
1198 
1199             /* Branch not taken.  */
1200             tcg_gen_goto_tb(0);
1201             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1202             tcg_gen_exit_tb(s->base.tb, 0);
1203 
1204             /* Branch taken.  */
1205             gen_set_label(lab);
1206             per_breaking_event(s);
1207             tcg_gen_goto_tb(1);
1208             tcg_gen_movi_i64(psw_addr, dest);
1209             tcg_gen_exit_tb(s->base.tb, 1);
1210 
1211             ret = DISAS_NORETURN;
1212         } else {
1213             /* Fallthru can use goto_tb, but taken branch cannot.  */
1214             /* Store taken branch destination before the brcond.  This
1215                avoids having to allocate a new local temp to hold it.
1216                We'll overwrite this in the not taken case anyway.  */
1217             if (!is_imm) {
1218                 tcg_gen_mov_i64(psw_addr, cdest);
1219             }
1220 
1221             lab = gen_new_label();
1222             if (c->is_64) {
1223                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1224             } else {
1225                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1226             }
1227 
1228             /* Branch not taken.  */
1229             update_cc_op(s);
1230             tcg_gen_goto_tb(0);
1231             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1232             tcg_gen_exit_tb(s->base.tb, 0);
1233 
1234             gen_set_label(lab);
1235             if (is_imm) {
1236                 tcg_gen_movi_i64(psw_addr, dest);
1237             }
1238             per_breaking_event(s);
1239             ret = DISAS_PC_UPDATED;
1240         }
1241     } else {
1242         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1243            Most commonly we're single-stepping or some other condition that
1244            disables all use of goto_tb.  Just update the PC and exit.  */
1245 
1246         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1247         if (is_imm) {
1248             cdest = tcg_constant_i64(dest);
1249         }
1250 
1251         if (c->is_64) {
1252             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1253                                 cdest, next);
1254             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1255         } else {
1256             TCGv_i32 t0 = tcg_temp_new_i32();
1257             TCGv_i64 t1 = tcg_temp_new_i64();
1258             TCGv_i64 z = tcg_constant_i64(0);
1259             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1260             tcg_gen_extu_i32_i64(t1, t0);
1261             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1262             per_branch_cond(s, TCG_COND_NE, t1, z);
1263         }
1264 
1265         ret = DISAS_PC_UPDATED;
1266     }
1267 
1268  egress:
1269     return ret;
1270 }
1271 
1272 /* ====================================================================== */
1273 /* The operations.  These perform the bulk of the work for any insn,
1274    usually after the operands have been loaded and output initialized.  */
1275 
1276 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1277 {
1278     tcg_gen_abs_i64(o->out, o->in2);
1279     return DISAS_NEXT;
1280 }
1281 
1282 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1283 {
1284     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1285     return DISAS_NEXT;
1286 }
1287 
1288 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1289 {
1290     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1291     return DISAS_NEXT;
1292 }
1293 
1294 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1295 {
1296     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1297     tcg_gen_mov_i64(o->out2, o->in2);
1298     return DISAS_NEXT;
1299 }
1300 
1301 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1302 {
1303     tcg_gen_add_i64(o->out, o->in1, o->in2);
1304     return DISAS_NEXT;
1305 }
1306 
1307 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1308 {
1309     tcg_gen_movi_i64(cc_src, 0);
1310     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1311     return DISAS_NEXT;
1312 }
1313 
1314 /* Compute carry into cc_src. */
1315 static void compute_carry(DisasContext *s)
1316 {
1317     switch (s->cc_op) {
1318     case CC_OP_ADDU:
1319         /* The carry value is already in cc_src (1,0). */
1320         break;
1321     case CC_OP_SUBU:
1322         tcg_gen_addi_i64(cc_src, cc_src, 1);
1323         break;
1324     default:
1325         gen_op_calc_cc(s);
1326         /* fall through */
1327     case CC_OP_STATIC:
1328         /* The carry flag is the msb of CC; compute into cc_src. */
1329         tcg_gen_extu_i32_i64(cc_src, cc_op);
1330         tcg_gen_shri_i64(cc_src, cc_src, 1);
1331         break;
1332     }
1333 }
1334 
1335 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1336 {
1337     compute_carry(s);
1338     tcg_gen_add_i64(o->out, o->in1, o->in2);
1339     tcg_gen_add_i64(o->out, o->out, cc_src);
1340     return DISAS_NEXT;
1341 }
1342 
1343 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1344 {
1345     compute_carry(s);
1346 
1347     TCGv_i64 zero = tcg_constant_i64(0);
1348     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1349     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1350 
1351     return DISAS_NEXT;
1352 }
1353 
1354 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1355 {
1356     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1357 
1358     o->in1 = tcg_temp_new_i64();
1359     if (non_atomic) {
1360         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1361     } else {
1362         /* Perform the atomic addition in memory. */
1363         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1364                                      s->insn->data);
1365     }
1366 
1367     /* Recompute also for atomic case: needed for setting CC. */
1368     tcg_gen_add_i64(o->out, o->in1, o->in2);
1369 
1370     if (non_atomic) {
1371         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1372     }
1373     return DISAS_NEXT;
1374 }
1375 
1376 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1377 {
1378     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1379 
1380     o->in1 = tcg_temp_new_i64();
1381     if (non_atomic) {
1382         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1383     } else {
1384         /* Perform the atomic addition in memory. */
1385         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1386                                      s->insn->data);
1387     }
1388 
1389     /* Recompute also for atomic case: needed for setting CC. */
1390     tcg_gen_movi_i64(cc_src, 0);
1391     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1392 
1393     if (non_atomic) {
1394         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1395     }
1396     return DISAS_NEXT;
1397 }
1398 
1399 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1400 {
1401     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1402     return DISAS_NEXT;
1403 }
1404 
1405 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1406 {
1407     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1408     return DISAS_NEXT;
1409 }
1410 
1411 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1412 {
1413     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1414     return DISAS_NEXT;
1415 }
1416 
1417 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1418 {
1419     tcg_gen_and_i64(o->out, o->in1, o->in2);
1420     return DISAS_NEXT;
1421 }
1422 
1423 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1424 {
1425     int shift = s->insn->data & 0xff;
1426     int size = s->insn->data >> 8;
1427     uint64_t mask = ((1ull << size) - 1) << shift;
1428     TCGv_i64 t = tcg_temp_new_i64();
1429 
1430     tcg_gen_shli_i64(t, o->in2, shift);
1431     tcg_gen_ori_i64(t, t, ~mask);
1432     tcg_gen_and_i64(o->out, o->in1, t);
1433 
1434     /* Produce the CC from only the bits manipulated.  */
1435     tcg_gen_andi_i64(cc_dst, o->out, mask);
1436     set_cc_nz_u64(s, cc_dst);
1437     return DISAS_NEXT;
1438 }
1439 
1440 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1441 {
1442     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1443     return DISAS_NEXT;
1444 }
1445 
1446 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1447 {
1448     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1449     return DISAS_NEXT;
1450 }
1451 
1452 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1453 {
1454     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1455     return DISAS_NEXT;
1456 }
1457 
1458 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1459 {
1460     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1461     return DISAS_NEXT;
1462 }
1463 
1464 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1465 {
1466     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1467     return DISAS_NEXT;
1468 }
1469 
1470 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1471 {
1472     o->in1 = tcg_temp_new_i64();
1473 
1474     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1475         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1476     } else {
1477         /* Perform the atomic operation in memory. */
1478         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1479                                      s->insn->data);
1480     }
1481 
1482     /* Recompute also for atomic case: needed for setting CC. */
1483     tcg_gen_and_i64(o->out, o->in1, o->in2);
1484 
1485     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1486         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1487     }
1488     return DISAS_NEXT;
1489 }
1490 
1491 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1492 {
1493     pc_to_link_info(o->out, s, s->pc_tmp);
1494     if (o->in2) {
1495         tcg_gen_mov_i64(psw_addr, o->in2);
1496         per_branch(s, false);
1497         return DISAS_PC_UPDATED;
1498     } else {
1499         return DISAS_NEXT;
1500     }
1501 }
1502 
1503 static void save_link_info(DisasContext *s, DisasOps *o)
1504 {
1505     TCGv_i64 t;
1506 
1507     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1508         pc_to_link_info(o->out, s, s->pc_tmp);
1509         return;
1510     }
1511     gen_op_calc_cc(s);
1512     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1513     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1514     t = tcg_temp_new_i64();
1515     tcg_gen_shri_i64(t, psw_mask, 16);
1516     tcg_gen_andi_i64(t, t, 0x0f000000);
1517     tcg_gen_or_i64(o->out, o->out, t);
1518     tcg_gen_extu_i32_i64(t, cc_op);
1519     tcg_gen_shli_i64(t, t, 28);
1520     tcg_gen_or_i64(o->out, o->out, t);
1521 }
1522 
1523 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1524 {
1525     save_link_info(s, o);
1526     if (o->in2) {
1527         tcg_gen_mov_i64(psw_addr, o->in2);
1528         per_branch(s, false);
1529         return DISAS_PC_UPDATED;
1530     } else {
1531         return DISAS_NEXT;
1532     }
1533 }
1534 
1535 /*
1536  * Disassemble the target of a branch. The results are returned in a form
1537  * suitable for passing into help_branch():
1538  *
1539  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1540  *   branches, whose DisasContext *S contains the relative immediate field RI,
1541  *   are considered fixed. All the other branches are considered computed.
1542  * - int IMM is the value of RI.
1543  * - TCGv_i64 CDEST is the address of the computed target.
1544  */
1545 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1546     if (have_field(s, ri)) {                                                   \
1547         if (unlikely(s->ex_value)) {                                           \
1548             cdest = tcg_temp_new_i64();                                        \
1549             tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\
1550             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1551             is_imm = false;                                                    \
1552         } else {                                                               \
1553             is_imm = true;                                                     \
1554         }                                                                      \
1555     } else {                                                                   \
1556         is_imm = false;                                                        \
1557     }                                                                          \
1558     imm = is_imm ? get_field(s, ri) : 0;                                       \
1559 } while (false)
1560 
1561 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1562 {
1563     DisasCompare c;
1564     bool is_imm;
1565     int imm;
1566 
1567     pc_to_link_info(o->out, s, s->pc_tmp);
1568 
1569     disas_jdest(s, i2, is_imm, imm, o->in2);
1570     disas_jcc(s, &c, 0xf);
1571     return help_branch(s, &c, is_imm, imm, o->in2);
1572 }
1573 
1574 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1575 {
1576     int m1 = get_field(s, m1);
1577     DisasCompare c;
1578     bool is_imm;
1579     int imm;
1580 
1581     /* BCR with R2 = 0 causes no branching */
1582     if (have_field(s, r2) && get_field(s, r2) == 0) {
1583         if (m1 == 14) {
1584             /* Perform serialization */
1585             /* FIXME: check for fast-BCR-serialization facility */
1586             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1587         }
1588         if (m1 == 15) {
1589             /* Perform serialization */
1590             /* FIXME: perform checkpoint-synchronisation */
1591             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1592         }
1593         return DISAS_NEXT;
1594     }
1595 
1596     disas_jdest(s, i2, is_imm, imm, o->in2);
1597     disas_jcc(s, &c, m1);
1598     return help_branch(s, &c, is_imm, imm, o->in2);
1599 }
1600 
1601 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1602 {
1603     int r1 = get_field(s, r1);
1604     DisasCompare c;
1605     bool is_imm;
1606     TCGv_i64 t;
1607     int imm;
1608 
1609     c.cond = TCG_COND_NE;
1610     c.is_64 = false;
1611 
1612     t = tcg_temp_new_i64();
1613     tcg_gen_subi_i64(t, regs[r1], 1);
1614     store_reg32_i64(r1, t);
1615     c.u.s32.a = tcg_temp_new_i32();
1616     c.u.s32.b = tcg_constant_i32(0);
1617     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1618 
1619     disas_jdest(s, i2, is_imm, imm, o->in2);
1620     return help_branch(s, &c, is_imm, imm, o->in2);
1621 }
1622 
1623 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1624 {
1625     int r1 = get_field(s, r1);
1626     int imm = get_field(s, i2);
1627     DisasCompare c;
1628     TCGv_i64 t;
1629 
1630     c.cond = TCG_COND_NE;
1631     c.is_64 = false;
1632 
1633     t = tcg_temp_new_i64();
1634     tcg_gen_shri_i64(t, regs[r1], 32);
1635     tcg_gen_subi_i64(t, t, 1);
1636     store_reg32h_i64(r1, t);
1637     c.u.s32.a = tcg_temp_new_i32();
1638     c.u.s32.b = tcg_constant_i32(0);
1639     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1640 
1641     return help_branch(s, &c, 1, imm, o->in2);
1642 }
1643 
1644 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1645 {
1646     int r1 = get_field(s, r1);
1647     DisasCompare c;
1648     bool is_imm;
1649     int imm;
1650 
1651     c.cond = TCG_COND_NE;
1652     c.is_64 = true;
1653 
1654     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1655     c.u.s64.a = regs[r1];
1656     c.u.s64.b = tcg_constant_i64(0);
1657 
1658     disas_jdest(s, i2, is_imm, imm, o->in2);
1659     return help_branch(s, &c, is_imm, imm, o->in2);
1660 }
1661 
1662 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1663 {
1664     int r1 = get_field(s, r1);
1665     int r3 = get_field(s, r3);
1666     DisasCompare c;
1667     bool is_imm;
1668     TCGv_i64 t;
1669     int imm;
1670 
1671     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1672     c.is_64 = false;
1673 
1674     t = tcg_temp_new_i64();
1675     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1676     c.u.s32.a = tcg_temp_new_i32();
1677     c.u.s32.b = tcg_temp_new_i32();
1678     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1679     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1680     store_reg32_i64(r1, t);
1681 
1682     disas_jdest(s, i2, is_imm, imm, o->in2);
1683     return help_branch(s, &c, is_imm, imm, o->in2);
1684 }
1685 
1686 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1687 {
1688     int r1 = get_field(s, r1);
1689     int r3 = get_field(s, r3);
1690     DisasCompare c;
1691     bool is_imm;
1692     int imm;
1693 
1694     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1695     c.is_64 = true;
1696 
1697     if (r1 == (r3 | 1)) {
1698         c.u.s64.b = load_reg(r3 | 1);
1699     } else {
1700         c.u.s64.b = regs[r3 | 1];
1701     }
1702 
1703     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1704     c.u.s64.a = regs[r1];
1705 
1706     disas_jdest(s, i2, is_imm, imm, o->in2);
1707     return help_branch(s, &c, is_imm, imm, o->in2);
1708 }
1709 
1710 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1711 {
1712     int imm, m3 = get_field(s, m3);
1713     bool is_imm;
1714     DisasCompare c;
1715 
1716     c.cond = ltgt_cond[m3];
1717     if (s->insn->data) {
1718         c.cond = tcg_unsigned_cond(c.cond);
1719     }
1720     c.is_64 = true;
1721     c.u.s64.a = o->in1;
1722     c.u.s64.b = o->in2;
1723 
1724     o->out = NULL;
1725     disas_jdest(s, i4, is_imm, imm, o->out);
1726     if (!is_imm && !o->out) {
1727         imm = 0;
1728         o->out = get_address(s, 0, get_field(s, b4),
1729                              get_field(s, d4));
1730     }
1731 
1732     return help_branch(s, &c, is_imm, imm, o->out);
1733 }
1734 
1735 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1736 {
1737     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1738     set_cc_static(s);
1739     return DISAS_NEXT;
1740 }
1741 
1742 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1743 {
1744     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1745     set_cc_static(s);
1746     return DISAS_NEXT;
1747 }
1748 
1749 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1750 {
1751     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1752     set_cc_static(s);
1753     return DISAS_NEXT;
1754 }
1755 
1756 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1757                                    bool m4_with_fpe)
1758 {
1759     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1760     uint8_t m3 = get_field(s, m3);
1761     uint8_t m4 = get_field(s, m4);
1762 
1763     /* m3 field was introduced with FPE */
1764     if (!fpe && m3_with_fpe) {
1765         m3 = 0;
1766     }
1767     /* m4 field was introduced with FPE */
1768     if (!fpe && m4_with_fpe) {
1769         m4 = 0;
1770     }
1771 
1772     /* Check for valid rounding modes. Mode 3 was introduced later. */
1773     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1774         gen_program_exception(s, PGM_SPECIFICATION);
1775         return NULL;
1776     }
1777 
1778     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1779 }
1780 
1781 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1782 {
1783     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1784 
1785     if (!m34) {
1786         return DISAS_NORETURN;
1787     }
1788     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1789     set_cc_static(s);
1790     return DISAS_NEXT;
1791 }
1792 
1793 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1794 {
1795     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1796 
1797     if (!m34) {
1798         return DISAS_NORETURN;
1799     }
1800     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1801     set_cc_static(s);
1802     return DISAS_NEXT;
1803 }
1804 
1805 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1806 {
1807     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1808 
1809     if (!m34) {
1810         return DISAS_NORETURN;
1811     }
1812     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1813     set_cc_static(s);
1814     return DISAS_NEXT;
1815 }
1816 
1817 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1818 {
1819     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1820 
1821     if (!m34) {
1822         return DISAS_NORETURN;
1823     }
1824     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1825     set_cc_static(s);
1826     return DISAS_NEXT;
1827 }
1828 
1829 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1830 {
1831     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1832 
1833     if (!m34) {
1834         return DISAS_NORETURN;
1835     }
1836     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1837     set_cc_static(s);
1838     return DISAS_NEXT;
1839 }
1840 
1841 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1842 {
1843     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1844 
1845     if (!m34) {
1846         return DISAS_NORETURN;
1847     }
1848     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1849     set_cc_static(s);
1850     return DISAS_NEXT;
1851 }
1852 
1853 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1854 {
1855     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1856 
1857     if (!m34) {
1858         return DISAS_NORETURN;
1859     }
1860     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1861     set_cc_static(s);
1862     return DISAS_NEXT;
1863 }
1864 
1865 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1866 {
1867     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1868 
1869     if (!m34) {
1870         return DISAS_NORETURN;
1871     }
1872     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1873     set_cc_static(s);
1874     return DISAS_NEXT;
1875 }
1876 
1877 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1878 {
1879     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1880 
1881     if (!m34) {
1882         return DISAS_NORETURN;
1883     }
1884     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1885     set_cc_static(s);
1886     return DISAS_NEXT;
1887 }
1888 
1889 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1890 {
1891     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1892 
1893     if (!m34) {
1894         return DISAS_NORETURN;
1895     }
1896     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1897     set_cc_static(s);
1898     return DISAS_NEXT;
1899 }
1900 
1901 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1902 {
1903     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1904 
1905     if (!m34) {
1906         return DISAS_NORETURN;
1907     }
1908     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1909     set_cc_static(s);
1910     return DISAS_NEXT;
1911 }
1912 
1913 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1914 {
1915     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1916 
1917     if (!m34) {
1918         return DISAS_NORETURN;
1919     }
1920     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1921     set_cc_static(s);
1922     return DISAS_NEXT;
1923 }
1924 
1925 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1926 {
1927     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1928 
1929     if (!m34) {
1930         return DISAS_NORETURN;
1931     }
1932     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1933     return DISAS_NEXT;
1934 }
1935 
1936 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1937 {
1938     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1939 
1940     if (!m34) {
1941         return DISAS_NORETURN;
1942     }
1943     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1944     return DISAS_NEXT;
1945 }
1946 
1947 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1948 {
1949     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1950 
1951     if (!m34) {
1952         return DISAS_NORETURN;
1953     }
1954     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
1955     return DISAS_NEXT;
1956 }
1957 
1958 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1959 {
1960     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1961 
1962     if (!m34) {
1963         return DISAS_NORETURN;
1964     }
1965     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1966     return DISAS_NEXT;
1967 }
1968 
1969 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1970 {
1971     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1972 
1973     if (!m34) {
1974         return DISAS_NORETURN;
1975     }
1976     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
1977     return DISAS_NEXT;
1978 }
1979 
1980 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1981 {
1982     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1983 
1984     if (!m34) {
1985         return DISAS_NORETURN;
1986     }
1987     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
1988     return DISAS_NEXT;
1989 }
1990 
1991 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1992 {
1993     int r2 = get_field(s, r2);
1994     TCGv_i128 pair = tcg_temp_new_i128();
1995     TCGv_i64 len = tcg_temp_new_i64();
1996 
1997     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1998     set_cc_static(s);
1999     tcg_gen_extr_i128_i64(o->out, len, pair);
2000 
2001     tcg_gen_add_i64(regs[r2], regs[r2], len);
2002     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2003 
2004     return DISAS_NEXT;
2005 }
2006 
2007 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2008 {
2009     int l = get_field(s, l1);
2010     TCGv_i32 vl;
2011     MemOp mop;
2012 
2013     switch (l + 1) {
2014     case 1:
2015     case 2:
2016     case 4:
2017     case 8:
2018         mop = ctz32(l + 1) | MO_TE;
2019         tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop);
2020         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
2021         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2022         return DISAS_NEXT;
2023     default:
2024         vl = tcg_constant_i32(l);
2025         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2026         set_cc_static(s);
2027         return DISAS_NEXT;
2028     }
2029 }
2030 
2031 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2032 {
2033     int r1 = get_field(s, r1);
2034     int r2 = get_field(s, r2);
2035     TCGv_i32 t1, t2;
2036 
2037     /* r1 and r2 must be even.  */
2038     if (r1 & 1 || r2 & 1) {
2039         gen_program_exception(s, PGM_SPECIFICATION);
2040         return DISAS_NORETURN;
2041     }
2042 
2043     t1 = tcg_constant_i32(r1);
2044     t2 = tcg_constant_i32(r2);
2045     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2046     set_cc_static(s);
2047     return DISAS_NEXT;
2048 }
2049 
2050 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2051 {
2052     int r1 = get_field(s, r1);
2053     int r3 = get_field(s, r3);
2054     TCGv_i32 t1, t3;
2055 
2056     /* r1 and r3 must be even.  */
2057     if (r1 & 1 || r3 & 1) {
2058         gen_program_exception(s, PGM_SPECIFICATION);
2059         return DISAS_NORETURN;
2060     }
2061 
2062     t1 = tcg_constant_i32(r1);
2063     t3 = tcg_constant_i32(r3);
2064     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2065     set_cc_static(s);
2066     return DISAS_NEXT;
2067 }
2068 
2069 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2070 {
2071     int r1 = get_field(s, r1);
2072     int r3 = get_field(s, r3);
2073     TCGv_i32 t1, t3;
2074 
2075     /* r1 and r3 must be even.  */
2076     if (r1 & 1 || r3 & 1) {
2077         gen_program_exception(s, PGM_SPECIFICATION);
2078         return DISAS_NORETURN;
2079     }
2080 
2081     t1 = tcg_constant_i32(r1);
2082     t3 = tcg_constant_i32(r3);
2083     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2084     set_cc_static(s);
2085     return DISAS_NEXT;
2086 }
2087 
2088 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2089 {
2090     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2091     TCGv_i32 t1 = tcg_temp_new_i32();
2092 
2093     tcg_gen_extrl_i64_i32(t1, o->in1);
2094     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2095     set_cc_static(s);
2096     return DISAS_NEXT;
2097 }
2098 
2099 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2100 {
2101     TCGv_i128 pair = tcg_temp_new_i128();
2102 
2103     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2104     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2105 
2106     set_cc_static(s);
2107     return DISAS_NEXT;
2108 }
2109 
2110 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2111 {
2112     TCGv_i64 t = tcg_temp_new_i64();
2113     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2114     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2115     tcg_gen_or_i64(o->out, o->out, t);
2116     return DISAS_NEXT;
2117 }
2118 
2119 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2120 {
2121     int d2 = get_field(s, d2);
2122     int b2 = get_field(s, b2);
2123     TCGv_i64 addr, cc;
2124 
2125     /* Note that in1 = R3 (new value) and
2126        in2 = (zero-extended) R1 (expected value).  */
2127 
2128     addr = get_address(s, 0, b2, d2);
2129     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2130                                get_mem_index(s), s->insn->data | MO_ALIGN);
2131 
2132     /* Are the memory and expected values (un)equal?  Note that this setcond
2133        produces the output CC value, thus the NE sense of the test.  */
2134     cc = tcg_temp_new_i64();
2135     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2136     tcg_gen_extrl_i64_i32(cc_op, cc);
2137     set_cc_static(s);
2138 
2139     return DISAS_NEXT;
2140 }
2141 
2142 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2143 {
2144     int r1 = get_field(s, r1);
2145 
2146     o->out_128 = tcg_temp_new_i128();
2147     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2148 
2149     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2150     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2151                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2152 
2153     /*
2154      * Extract result into cc_dst:cc_src, compare vs the expected value
2155      * in the as yet unmodified input registers, then update CC_OP.
2156      */
2157     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2158     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2159     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2160     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2161     set_cc_nz_u64(s, cc_dst);
2162 
2163     return DISAS_NEXT;
2164 }
2165 
2166 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2167 {
2168     int r3 = get_field(s, r3);
2169     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2170 
2171     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2172         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2173     } else {
2174         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2175     }
2176 
2177     set_cc_static(s);
2178     return DISAS_NEXT;
2179 }
2180 
2181 #ifndef CONFIG_USER_ONLY
2182 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2183 {
2184     MemOp mop = s->insn->data;
2185     TCGv_i64 addr, old, cc;
2186     TCGLabel *lab = gen_new_label();
2187 
2188     /* Note that in1 = R1 (zero-extended expected value),
2189        out = R1 (original reg), out2 = R1+1 (new value).  */
2190 
2191     addr = tcg_temp_new_i64();
2192     old = tcg_temp_new_i64();
2193     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2194     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2195                                get_mem_index(s), mop | MO_ALIGN);
2196 
2197     /* Are the memory and expected values (un)equal?  */
2198     cc = tcg_temp_new_i64();
2199     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2200     tcg_gen_extrl_i64_i32(cc_op, cc);
2201 
2202     /* Write back the output now, so that it happens before the
2203        following branch, so that we don't need local temps.  */
2204     if ((mop & MO_SIZE) == MO_32) {
2205         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2206     } else {
2207         tcg_gen_mov_i64(o->out, old);
2208     }
2209 
2210     /* If the comparison was equal, and the LSB of R2 was set,
2211        then we need to flush the TLB (for all cpus).  */
2212     tcg_gen_xori_i64(cc, cc, 1);
2213     tcg_gen_and_i64(cc, cc, o->in2);
2214     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2215 
2216     gen_helper_purge(cpu_env);
2217     gen_set_label(lab);
2218 
2219     return DISAS_NEXT;
2220 }
2221 #endif
2222 
2223 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2224 {
2225     TCGv_i64 t1 = tcg_temp_new_i64();
2226     TCGv_i32 t2 = tcg_temp_new_i32();
2227     tcg_gen_extrl_i64_i32(t2, o->in1);
2228     gen_helper_cvd(t1, t2);
2229     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2230     return DISAS_NEXT;
2231 }
2232 
2233 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2234 {
2235     int m3 = get_field(s, m3);
2236     TCGLabel *lab = gen_new_label();
2237     TCGCond c;
2238 
2239     c = tcg_invert_cond(ltgt_cond[m3]);
2240     if (s->insn->data) {
2241         c = tcg_unsigned_cond(c);
2242     }
2243     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2244 
2245     /* Trap.  */
2246     gen_trap(s);
2247 
2248     gen_set_label(lab);
2249     return DISAS_NEXT;
2250 }
2251 
2252 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2253 {
2254     int m3 = get_field(s, m3);
2255     int r1 = get_field(s, r1);
2256     int r2 = get_field(s, r2);
2257     TCGv_i32 tr1, tr2, chk;
2258 
2259     /* R1 and R2 must both be even.  */
2260     if ((r1 | r2) & 1) {
2261         gen_program_exception(s, PGM_SPECIFICATION);
2262         return DISAS_NORETURN;
2263     }
2264     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2265         m3 = 0;
2266     }
2267 
2268     tr1 = tcg_constant_i32(r1);
2269     tr2 = tcg_constant_i32(r2);
2270     chk = tcg_constant_i32(m3);
2271 
2272     switch (s->insn->data) {
2273     case 12:
2274         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2275         break;
2276     case 14:
2277         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2278         break;
2279     case 21:
2280         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2281         break;
2282     case 24:
2283         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2284         break;
2285     case 41:
2286         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2287         break;
2288     case 42:
2289         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2290         break;
2291     default:
2292         g_assert_not_reached();
2293     }
2294 
2295     set_cc_static(s);
2296     return DISAS_NEXT;
2297 }
2298 
2299 #ifndef CONFIG_USER_ONLY
2300 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2301 {
2302     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2303     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2304     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2305 
2306     gen_helper_diag(cpu_env, r1, r3, func_code);
2307     return DISAS_NEXT;
2308 }
2309 #endif
2310 
2311 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2312 {
2313     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2314     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2315     return DISAS_NEXT;
2316 }
2317 
2318 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2319 {
2320     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2321     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2322     return DISAS_NEXT;
2323 }
2324 
2325 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2326 {
2327     TCGv_i128 t = tcg_temp_new_i128();
2328 
2329     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2330     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2331     return DISAS_NEXT;
2332 }
2333 
2334 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2335 {
2336     TCGv_i128 t = tcg_temp_new_i128();
2337 
2338     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2339     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2340     return DISAS_NEXT;
2341 }
2342 
2343 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2344 {
2345     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2346     return DISAS_NEXT;
2347 }
2348 
2349 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2350 {
2351     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2352     return DISAS_NEXT;
2353 }
2354 
2355 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2356 {
2357     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2358     return DISAS_NEXT;
2359 }
2360 
2361 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2362 {
2363     int r2 = get_field(s, r2);
2364     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2365     return DISAS_NEXT;
2366 }
2367 
2368 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2369 {
2370     /* No cache information provided.  */
2371     tcg_gen_movi_i64(o->out, -1);
2372     return DISAS_NEXT;
2373 }
2374 
2375 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2376 {
2377     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2378     return DISAS_NEXT;
2379 }
2380 
2381 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2382 {
2383     int r1 = get_field(s, r1);
2384     int r2 = get_field(s, r2);
2385     TCGv_i64 t = tcg_temp_new_i64();
2386 
2387     /* Note the "subsequently" in the PoO, which implies a defined result
2388        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2389     tcg_gen_shri_i64(t, psw_mask, 32);
2390     store_reg32_i64(r1, t);
2391     if (r2 != 0) {
2392         store_reg32_i64(r2, psw_mask);
2393     }
2394     return DISAS_NEXT;
2395 }
2396 
2397 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2398 {
2399     int r1 = get_field(s, r1);
2400     TCGv_i32 ilen;
2401     TCGv_i64 v1;
2402 
2403     /* Nested EXECUTE is not allowed.  */
2404     if (unlikely(s->ex_value)) {
2405         gen_program_exception(s, PGM_EXECUTE);
2406         return DISAS_NORETURN;
2407     }
2408 
2409     update_psw_addr(s);
2410     update_cc_op(s);
2411 
2412     if (r1 == 0) {
2413         v1 = tcg_constant_i64(0);
2414     } else {
2415         v1 = regs[r1];
2416     }
2417 
2418     ilen = tcg_constant_i32(s->ilen);
2419     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2420 
2421     return DISAS_PC_CC_UPDATED;
2422 }
2423 
2424 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2425 {
2426     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2427 
2428     if (!m34) {
2429         return DISAS_NORETURN;
2430     }
2431     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2432     return DISAS_NEXT;
2433 }
2434 
2435 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2436 {
2437     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2438 
2439     if (!m34) {
2440         return DISAS_NORETURN;
2441     }
2442     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2443     return DISAS_NEXT;
2444 }
2445 
2446 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2447 {
2448     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2449 
2450     if (!m34) {
2451         return DISAS_NORETURN;
2452     }
2453     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2454     return DISAS_NEXT;
2455 }
2456 
2457 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2458 {
2459     /* We'll use the original input for cc computation, since we get to
2460        compare that against 0, which ought to be better than comparing
2461        the real output against 64.  It also lets cc_dst be a convenient
2462        temporary during our computation.  */
2463     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2464 
2465     /* R1 = IN ? CLZ(IN) : 64.  */
2466     tcg_gen_clzi_i64(o->out, o->in2, 64);
2467 
2468     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2469        value by 64, which is undefined.  But since the shift is 64 iff the
2470        input is zero, we still get the correct result after and'ing.  */
2471     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2472     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2473     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2474     return DISAS_NEXT;
2475 }
2476 
2477 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2478 {
2479     int m3 = get_field(s, m3);
2480     int pos, len, base = s->insn->data;
2481     TCGv_i64 tmp = tcg_temp_new_i64();
2482     uint64_t ccm;
2483 
2484     switch (m3) {
2485     case 0xf:
2486         /* Effectively a 32-bit load.  */
2487         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2488         len = 32;
2489         goto one_insert;
2490 
2491     case 0xc:
2492     case 0x6:
2493     case 0x3:
2494         /* Effectively a 16-bit load.  */
2495         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2496         len = 16;
2497         goto one_insert;
2498 
2499     case 0x8:
2500     case 0x4:
2501     case 0x2:
2502     case 0x1:
2503         /* Effectively an 8-bit load.  */
2504         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2505         len = 8;
2506         goto one_insert;
2507 
2508     one_insert:
2509         pos = base + ctz32(m3) * 8;
2510         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2511         ccm = ((1ull << len) - 1) << pos;
2512         break;
2513 
2514     default:
2515         /* This is going to be a sequence of loads and inserts.  */
2516         pos = base + 32 - 8;
2517         ccm = 0;
2518         while (m3) {
2519             if (m3 & 0x8) {
2520                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2521                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2522                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2523                 ccm |= 0xffull << pos;
2524             }
2525             m3 = (m3 << 1) & 0xf;
2526             pos -= 8;
2527         }
2528         break;
2529     }
2530 
2531     tcg_gen_movi_i64(tmp, ccm);
2532     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2533     return DISAS_NEXT;
2534 }
2535 
2536 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2537 {
2538     int shift = s->insn->data & 0xff;
2539     int size = s->insn->data >> 8;
2540     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2541     return DISAS_NEXT;
2542 }
2543 
2544 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2545 {
2546     TCGv_i64 t1, t2;
2547 
2548     gen_op_calc_cc(s);
2549     t1 = tcg_temp_new_i64();
2550     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2551     t2 = tcg_temp_new_i64();
2552     tcg_gen_extu_i32_i64(t2, cc_op);
2553     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2554     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2555     return DISAS_NEXT;
2556 }
2557 
2558 #ifndef CONFIG_USER_ONLY
2559 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2560 {
2561     TCGv_i32 m4;
2562 
2563     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2564         m4 = tcg_constant_i32(get_field(s, m4));
2565     } else {
2566         m4 = tcg_constant_i32(0);
2567     }
2568     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2569     return DISAS_NEXT;
2570 }
2571 
2572 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2573 {
2574     TCGv_i32 m4;
2575 
2576     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2577         m4 = tcg_constant_i32(get_field(s, m4));
2578     } else {
2579         m4 = tcg_constant_i32(0);
2580     }
2581     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2582     return DISAS_NEXT;
2583 }
2584 
2585 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2586 {
2587     gen_helper_iske(o->out, cpu_env, o->in2);
2588     return DISAS_NEXT;
2589 }
2590 #endif
2591 
2592 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2593 {
2594     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2595     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2596     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2597     TCGv_i32 t_r1, t_r2, t_r3, type;
2598 
2599     switch (s->insn->data) {
2600     case S390_FEAT_TYPE_KMA:
2601         if (r3 == r1 || r3 == r2) {
2602             gen_program_exception(s, PGM_SPECIFICATION);
2603             return DISAS_NORETURN;
2604         }
2605         /* FALL THROUGH */
2606     case S390_FEAT_TYPE_KMCTR:
2607         if (r3 & 1 || !r3) {
2608             gen_program_exception(s, PGM_SPECIFICATION);
2609             return DISAS_NORETURN;
2610         }
2611         /* FALL THROUGH */
2612     case S390_FEAT_TYPE_PPNO:
2613     case S390_FEAT_TYPE_KMF:
2614     case S390_FEAT_TYPE_KMC:
2615     case S390_FEAT_TYPE_KMO:
2616     case S390_FEAT_TYPE_KM:
2617         if (r1 & 1 || !r1) {
2618             gen_program_exception(s, PGM_SPECIFICATION);
2619             return DISAS_NORETURN;
2620         }
2621         /* FALL THROUGH */
2622     case S390_FEAT_TYPE_KMAC:
2623     case S390_FEAT_TYPE_KIMD:
2624     case S390_FEAT_TYPE_KLMD:
2625         if (r2 & 1 || !r2) {
2626             gen_program_exception(s, PGM_SPECIFICATION);
2627             return DISAS_NORETURN;
2628         }
2629         /* FALL THROUGH */
2630     case S390_FEAT_TYPE_PCKMO:
2631     case S390_FEAT_TYPE_PCC:
2632         break;
2633     default:
2634         g_assert_not_reached();
2635     };
2636 
2637     t_r1 = tcg_constant_i32(r1);
2638     t_r2 = tcg_constant_i32(r2);
2639     t_r3 = tcg_constant_i32(r3);
2640     type = tcg_constant_i32(s->insn->data);
2641     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2642     set_cc_static(s);
2643     return DISAS_NEXT;
2644 }
2645 
2646 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2647 {
2648     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2649     set_cc_static(s);
2650     return DISAS_NEXT;
2651 }
2652 
2653 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2654 {
2655     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2656     set_cc_static(s);
2657     return DISAS_NEXT;
2658 }
2659 
2660 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2661 {
2662     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2663     set_cc_static(s);
2664     return DISAS_NEXT;
2665 }
2666 
2667 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2668 {
2669     /* The real output is indeed the original value in memory;
2670        recompute the addition for the computation of CC.  */
2671     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2672                                  s->insn->data | MO_ALIGN);
2673     /* However, we need to recompute the addition for setting CC.  */
2674     tcg_gen_add_i64(o->out, o->in1, o->in2);
2675     return DISAS_NEXT;
2676 }
2677 
2678 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2679 {
2680     /* The real output is indeed the original value in memory;
2681        recompute the addition for the computation of CC.  */
2682     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2683                                  s->insn->data | MO_ALIGN);
2684     /* However, we need to recompute the operation for setting CC.  */
2685     tcg_gen_and_i64(o->out, o->in1, o->in2);
2686     return DISAS_NEXT;
2687 }
2688 
2689 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2690 {
2691     /* The real output is indeed the original value in memory;
2692        recompute the addition for the computation of CC.  */
2693     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2694                                 s->insn->data | MO_ALIGN);
2695     /* However, we need to recompute the operation for setting CC.  */
2696     tcg_gen_or_i64(o->out, o->in1, o->in2);
2697     return DISAS_NEXT;
2698 }
2699 
2700 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2701 {
2702     /* The real output is indeed the original value in memory;
2703        recompute the addition for the computation of CC.  */
2704     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2705                                  s->insn->data | MO_ALIGN);
2706     /* However, we need to recompute the operation for setting CC.  */
2707     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2708     return DISAS_NEXT;
2709 }
2710 
2711 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2712 {
2713     gen_helper_ldeb(o->out, cpu_env, o->in2);
2714     return DISAS_NEXT;
2715 }
2716 
2717 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2718 {
2719     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2720 
2721     if (!m34) {
2722         return DISAS_NORETURN;
2723     }
2724     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2725     return DISAS_NEXT;
2726 }
2727 
2728 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2729 {
2730     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2731 
2732     if (!m34) {
2733         return DISAS_NORETURN;
2734     }
2735     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2736     return DISAS_NEXT;
2737 }
2738 
2739 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2740 {
2741     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2742 
2743     if (!m34) {
2744         return DISAS_NORETURN;
2745     }
2746     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2747     return DISAS_NEXT;
2748 }
2749 
2750 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2751 {
2752     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2753     return DISAS_NEXT;
2754 }
2755 
2756 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2757 {
2758     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2759     return DISAS_NEXT;
2760 }
2761 
2762 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2763 {
2764     tcg_gen_shli_i64(o->out, o->in2, 32);
2765     return DISAS_NEXT;
2766 }
2767 
2768 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2769 {
2770     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2771     return DISAS_NEXT;
2772 }
2773 
2774 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2775 {
2776     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2777     return DISAS_NEXT;
2778 }
2779 
2780 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2781 {
2782     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2783     return DISAS_NEXT;
2784 }
2785 
2786 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2787 {
2788     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2789     return DISAS_NEXT;
2790 }
2791 
2792 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2793 {
2794     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2795     return DISAS_NEXT;
2796 }
2797 
2798 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2799 {
2800     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2801                        MO_TESL | s->insn->data);
2802     return DISAS_NEXT;
2803 }
2804 
2805 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2806 {
2807     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2808                        MO_TEUL | s->insn->data);
2809     return DISAS_NEXT;
2810 }
2811 
2812 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2813 {
2814     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2815                         MO_TEUQ | s->insn->data);
2816     return DISAS_NEXT;
2817 }
2818 
2819 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2820 {
2821     TCGLabel *lab = gen_new_label();
2822     store_reg32_i64(get_field(s, r1), o->in2);
2823     /* The value is stored even in case of trap. */
2824     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2825     gen_trap(s);
2826     gen_set_label(lab);
2827     return DISAS_NEXT;
2828 }
2829 
2830 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2831 {
2832     TCGLabel *lab = gen_new_label();
2833     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2834     /* The value is stored even in case of trap. */
2835     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2836     gen_trap(s);
2837     gen_set_label(lab);
2838     return DISAS_NEXT;
2839 }
2840 
2841 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2842 {
2843     TCGLabel *lab = gen_new_label();
2844     store_reg32h_i64(get_field(s, r1), o->in2);
2845     /* The value is stored even in case of trap. */
2846     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2847     gen_trap(s);
2848     gen_set_label(lab);
2849     return DISAS_NEXT;
2850 }
2851 
2852 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2853 {
2854     TCGLabel *lab = gen_new_label();
2855 
2856     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2857     /* The value is stored even in case of trap. */
2858     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2859     gen_trap(s);
2860     gen_set_label(lab);
2861     return DISAS_NEXT;
2862 }
2863 
2864 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2865 {
2866     TCGLabel *lab = gen_new_label();
2867     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2868     /* The value is stored even in case of trap. */
2869     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2870     gen_trap(s);
2871     gen_set_label(lab);
2872     return DISAS_NEXT;
2873 }
2874 
2875 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2876 {
2877     DisasCompare c;
2878 
2879     if (have_field(s, m3)) {
2880         /* LOAD * ON CONDITION */
2881         disas_jcc(s, &c, get_field(s, m3));
2882     } else {
2883         /* SELECT */
2884         disas_jcc(s, &c, get_field(s, m4));
2885     }
2886 
2887     if (c.is_64) {
2888         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2889                             o->in2, o->in1);
2890     } else {
2891         TCGv_i32 t32 = tcg_temp_new_i32();
2892         TCGv_i64 t, z;
2893 
2894         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2895 
2896         t = tcg_temp_new_i64();
2897         tcg_gen_extu_i32_i64(t, t32);
2898 
2899         z = tcg_constant_i64(0);
2900         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2901     }
2902 
2903     return DISAS_NEXT;
2904 }
2905 
2906 #ifndef CONFIG_USER_ONLY
2907 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2908 {
2909     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2910     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2911 
2912     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2913     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2914     s->exit_to_mainloop = true;
2915     return DISAS_TOO_MANY;
2916 }
2917 
2918 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2919 {
2920     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2921     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2922 
2923     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2924     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2925     s->exit_to_mainloop = true;
2926     return DISAS_TOO_MANY;
2927 }
2928 
2929 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2930 {
2931     gen_helper_lra(o->out, cpu_env, o->in2);
2932     set_cc_static(s);
2933     return DISAS_NEXT;
2934 }
2935 
2936 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2937 {
2938     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2939     return DISAS_NEXT;
2940 }
2941 
2942 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2943 {
2944     TCGv_i64 mask, addr;
2945 
2946     per_breaking_event(s);
2947 
2948     /*
2949      * Convert the short PSW into the normal PSW, similar to what
2950      * s390_cpu_load_normal() does.
2951      */
2952     mask = tcg_temp_new_i64();
2953     addr = tcg_temp_new_i64();
2954     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2955     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2956     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2957     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2958     gen_helper_load_psw(cpu_env, mask, addr);
2959     return DISAS_NORETURN;
2960 }
2961 
2962 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2963 {
2964     TCGv_i64 t1, t2;
2965 
2966     per_breaking_event(s);
2967 
2968     t1 = tcg_temp_new_i64();
2969     t2 = tcg_temp_new_i64();
2970     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2971                         MO_TEUQ | MO_ALIGN_8);
2972     tcg_gen_addi_i64(o->in2, o->in2, 8);
2973     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2974     gen_helper_load_psw(cpu_env, t1, t2);
2975     return DISAS_NORETURN;
2976 }
2977 #endif
2978 
2979 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2980 {
2981     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2982     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2983 
2984     gen_helper_lam(cpu_env, r1, o->in2, r3);
2985     return DISAS_NEXT;
2986 }
2987 
2988 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2989 {
2990     int r1 = get_field(s, r1);
2991     int r3 = get_field(s, r3);
2992     TCGv_i64 t1, t2;
2993 
2994     /* Only one register to read. */
2995     t1 = tcg_temp_new_i64();
2996     if (unlikely(r1 == r3)) {
2997         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2998         store_reg32_i64(r1, t1);
2999         return DISAS_NEXT;
3000     }
3001 
3002     /* First load the values of the first and last registers to trigger
3003        possible page faults. */
3004     t2 = tcg_temp_new_i64();
3005     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3006     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3007     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3008     store_reg32_i64(r1, t1);
3009     store_reg32_i64(r3, t2);
3010 
3011     /* Only two registers to read. */
3012     if (((r1 + 1) & 15) == r3) {
3013         return DISAS_NEXT;
3014     }
3015 
3016     /* Then load the remaining registers. Page fault can't occur. */
3017     r3 = (r3 - 1) & 15;
3018     tcg_gen_movi_i64(t2, 4);
3019     while (r1 != r3) {
3020         r1 = (r1 + 1) & 15;
3021         tcg_gen_add_i64(o->in2, o->in2, t2);
3022         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3023         store_reg32_i64(r1, t1);
3024     }
3025     return DISAS_NEXT;
3026 }
3027 
3028 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3029 {
3030     int r1 = get_field(s, r1);
3031     int r3 = get_field(s, r3);
3032     TCGv_i64 t1, t2;
3033 
3034     /* Only one register to read. */
3035     t1 = tcg_temp_new_i64();
3036     if (unlikely(r1 == r3)) {
3037         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3038         store_reg32h_i64(r1, t1);
3039         return DISAS_NEXT;
3040     }
3041 
3042     /* First load the values of the first and last registers to trigger
3043        possible page faults. */
3044     t2 = tcg_temp_new_i64();
3045     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3046     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3047     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3048     store_reg32h_i64(r1, t1);
3049     store_reg32h_i64(r3, t2);
3050 
3051     /* Only two registers to read. */
3052     if (((r1 + 1) & 15) == r3) {
3053         return DISAS_NEXT;
3054     }
3055 
3056     /* Then load the remaining registers. Page fault can't occur. */
3057     r3 = (r3 - 1) & 15;
3058     tcg_gen_movi_i64(t2, 4);
3059     while (r1 != r3) {
3060         r1 = (r1 + 1) & 15;
3061         tcg_gen_add_i64(o->in2, o->in2, t2);
3062         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3063         store_reg32h_i64(r1, t1);
3064     }
3065     return DISAS_NEXT;
3066 }
3067 
3068 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3069 {
3070     int r1 = get_field(s, r1);
3071     int r3 = get_field(s, r3);
3072     TCGv_i64 t1, t2;
3073 
3074     /* Only one register to read. */
3075     if (unlikely(r1 == r3)) {
3076         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3077         return DISAS_NEXT;
3078     }
3079 
3080     /* First load the values of the first and last registers to trigger
3081        possible page faults. */
3082     t1 = tcg_temp_new_i64();
3083     t2 = tcg_temp_new_i64();
3084     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3085     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3086     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3087     tcg_gen_mov_i64(regs[r1], t1);
3088 
3089     /* Only two registers to read. */
3090     if (((r1 + 1) & 15) == r3) {
3091         return DISAS_NEXT;
3092     }
3093 
3094     /* Then load the remaining registers. Page fault can't occur. */
3095     r3 = (r3 - 1) & 15;
3096     tcg_gen_movi_i64(t1, 8);
3097     while (r1 != r3) {
3098         r1 = (r1 + 1) & 15;
3099         tcg_gen_add_i64(o->in2, o->in2, t1);
3100         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3101     }
3102     return DISAS_NEXT;
3103 }
3104 
3105 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3106 {
3107     TCGv_i64 a1, a2;
3108     MemOp mop = s->insn->data;
3109 
3110     /* In a parallel context, stop the world and single step.  */
3111     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3112         update_psw_addr(s);
3113         update_cc_op(s);
3114         gen_exception(EXCP_ATOMIC);
3115         return DISAS_NORETURN;
3116     }
3117 
3118     /* In a serial context, perform the two loads ... */
3119     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3120     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3121     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3122     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3123 
3124     /* ... and indicate that we performed them while interlocked.  */
3125     gen_op_movi_cc(s, 0);
3126     return DISAS_NEXT;
3127 }
3128 
3129 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3130 {
3131     o->out_128 = tcg_temp_new_i128();
3132     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3133                          MO_TE | MO_128 | MO_ALIGN);
3134     return DISAS_NEXT;
3135 }
3136 
3137 #ifndef CONFIG_USER_ONLY
3138 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3139 {
3140     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3141     return DISAS_NEXT;
3142 }
3143 #endif
3144 
3145 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3146 {
3147     tcg_gen_andi_i64(o->out, o->in2, -256);
3148     return DISAS_NEXT;
3149 }
3150 
3151 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3152 {
3153     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3154 
3155     if (get_field(s, m3) > 6) {
3156         gen_program_exception(s, PGM_SPECIFICATION);
3157         return DISAS_NORETURN;
3158     }
3159 
3160     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3161     tcg_gen_neg_i64(o->addr1, o->addr1);
3162     tcg_gen_movi_i64(o->out, 16);
3163     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3164     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3165     return DISAS_NEXT;
3166 }
3167 
3168 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3169 {
3170     const uint16_t monitor_class = get_field(s, i2);
3171 
3172     if (monitor_class & 0xff00) {
3173         gen_program_exception(s, PGM_SPECIFICATION);
3174         return DISAS_NORETURN;
3175     }
3176 
3177 #if !defined(CONFIG_USER_ONLY)
3178     gen_helper_monitor_call(cpu_env, o->addr1,
3179                             tcg_constant_i32(monitor_class));
3180 #endif
3181     /* Defaults to a NOP. */
3182     return DISAS_NEXT;
3183 }
3184 
3185 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3186 {
3187     o->out = o->in2;
3188     o->in2 = NULL;
3189     return DISAS_NEXT;
3190 }
3191 
3192 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3193 {
3194     int b2 = get_field(s, b2);
3195     TCGv ar1 = tcg_temp_new_i64();
3196 
3197     o->out = o->in2;
3198     o->in2 = NULL;
3199 
3200     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3201     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3202         tcg_gen_movi_i64(ar1, 0);
3203         break;
3204     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3205         tcg_gen_movi_i64(ar1, 1);
3206         break;
3207     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3208         if (b2) {
3209             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3210         } else {
3211             tcg_gen_movi_i64(ar1, 0);
3212         }
3213         break;
3214     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3215         tcg_gen_movi_i64(ar1, 2);
3216         break;
3217     }
3218 
3219     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3220     return DISAS_NEXT;
3221 }
3222 
3223 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3224 {
3225     o->out = o->in1;
3226     o->out2 = o->in2;
3227     o->in1 = NULL;
3228     o->in2 = NULL;
3229     return DISAS_NEXT;
3230 }
3231 
3232 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3233 {
3234     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3235 
3236     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3237     return DISAS_NEXT;
3238 }
3239 
3240 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3241 {
3242     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3243     return DISAS_NEXT;
3244 }
3245 
3246 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3247 {
3248     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3249 
3250     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3251     return DISAS_NEXT;
3252 }
3253 
3254 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3255 {
3256     int r1 = get_field(s, r1);
3257     int r2 = get_field(s, r2);
3258     TCGv_i32 t1, t2;
3259 
3260     /* r1 and r2 must be even.  */
3261     if (r1 & 1 || r2 & 1) {
3262         gen_program_exception(s, PGM_SPECIFICATION);
3263         return DISAS_NORETURN;
3264     }
3265 
3266     t1 = tcg_constant_i32(r1);
3267     t2 = tcg_constant_i32(r2);
3268     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3269     set_cc_static(s);
3270     return DISAS_NEXT;
3271 }
3272 
3273 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3274 {
3275     int r1 = get_field(s, r1);
3276     int r3 = get_field(s, r3);
3277     TCGv_i32 t1, t3;
3278 
3279     /* r1 and r3 must be even.  */
3280     if (r1 & 1 || r3 & 1) {
3281         gen_program_exception(s, PGM_SPECIFICATION);
3282         return DISAS_NORETURN;
3283     }
3284 
3285     t1 = tcg_constant_i32(r1);
3286     t3 = tcg_constant_i32(r3);
3287     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3288     set_cc_static(s);
3289     return DISAS_NEXT;
3290 }
3291 
3292 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3293 {
3294     int r1 = get_field(s, r1);
3295     int r3 = get_field(s, r3);
3296     TCGv_i32 t1, t3;
3297 
3298     /* r1 and r3 must be even.  */
3299     if (r1 & 1 || r3 & 1) {
3300         gen_program_exception(s, PGM_SPECIFICATION);
3301         return DISAS_NORETURN;
3302     }
3303 
3304     t1 = tcg_constant_i32(r1);
3305     t3 = tcg_constant_i32(r3);
3306     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3307     set_cc_static(s);
3308     return DISAS_NEXT;
3309 }
3310 
3311 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3312 {
3313     int r3 = get_field(s, r3);
3314     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3315     set_cc_static(s);
3316     return DISAS_NEXT;
3317 }
3318 
3319 #ifndef CONFIG_USER_ONLY
3320 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3321 {
3322     int r1 = get_field(s, l1);
3323     int r3 = get_field(s, r3);
3324     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3325     set_cc_static(s);
3326     return DISAS_NEXT;
3327 }
3328 
3329 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3330 {
3331     int r1 = get_field(s, l1);
3332     int r3 = get_field(s, r3);
3333     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3334     set_cc_static(s);
3335     return DISAS_NEXT;
3336 }
3337 #endif
3338 
3339 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3340 {
3341     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3342 
3343     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3344     return DISAS_NEXT;
3345 }
3346 
3347 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3348 {
3349     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3350 
3351     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3352     return DISAS_NEXT;
3353 }
3354 
3355 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3356 {
3357     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3358     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3359 
3360     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3361     set_cc_static(s);
3362     return DISAS_NEXT;
3363 }
3364 
3365 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3366 {
3367     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3368     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3369 
3370     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3371     set_cc_static(s);
3372     return DISAS_NEXT;
3373 }
3374 
3375 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3376 {
3377     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3378 
3379     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3380     return DISAS_NEXT;
3381 }
3382 
3383 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3384 {
3385     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3386     return DISAS_NEXT;
3387 }
3388 
3389 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3390 {
3391     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3392     return DISAS_NEXT;
3393 }
3394 
3395 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3396 {
3397     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3398     return DISAS_NEXT;
3399 }
3400 
3401 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3402 {
3403     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3404     return DISAS_NEXT;
3405 }
3406 
3407 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3408 {
3409     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3410     return DISAS_NEXT;
3411 }
3412 
3413 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3414 {
3415     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3416     return DISAS_NEXT;
3417 }
3418 
3419 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3420 {
3421     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3422     return DISAS_NEXT;
3423 }
3424 
3425 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3426 {
3427     gen_helper_mxdb(o->out_128, cpu_env, o->in1_128, o->in2);
3428     return DISAS_NEXT;
3429 }
3430 
3431 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3432 {
3433     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3434     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3435     return DISAS_NEXT;
3436 }
3437 
3438 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3439 {
3440     TCGv_i64 r3 = load_freg(get_field(s, r3));
3441     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3442     return DISAS_NEXT;
3443 }
3444 
3445 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3446 {
3447     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3448     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3449     return DISAS_NEXT;
3450 }
3451 
3452 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3453 {
3454     TCGv_i64 r3 = load_freg(get_field(s, r3));
3455     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3456     return DISAS_NEXT;
3457 }
3458 
3459 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3460 {
3461     TCGv_i64 z = tcg_constant_i64(0);
3462     TCGv_i64 n = tcg_temp_new_i64();
3463 
3464     tcg_gen_neg_i64(n, o->in2);
3465     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3466     return DISAS_NEXT;
3467 }
3468 
3469 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3470 {
3471     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3472     return DISAS_NEXT;
3473 }
3474 
3475 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3476 {
3477     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3478     return DISAS_NEXT;
3479 }
3480 
3481 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3482 {
3483     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3484     tcg_gen_mov_i64(o->out2, o->in2);
3485     return DISAS_NEXT;
3486 }
3487 
3488 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3489 {
3490     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3491 
3492     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3493     set_cc_static(s);
3494     return DISAS_NEXT;
3495 }
3496 
3497 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3498 {
3499     tcg_gen_neg_i64(o->out, o->in2);
3500     return DISAS_NEXT;
3501 }
3502 
3503 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3504 {
3505     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3506     return DISAS_NEXT;
3507 }
3508 
3509 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3510 {
3511     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3512     return DISAS_NEXT;
3513 }
3514 
3515 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3516 {
3517     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3518     tcg_gen_mov_i64(o->out2, o->in2);
3519     return DISAS_NEXT;
3520 }
3521 
3522 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3523 {
3524     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3525 
3526     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3527     set_cc_static(s);
3528     return DISAS_NEXT;
3529 }
3530 
3531 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3532 {
3533     tcg_gen_or_i64(o->out, o->in1, o->in2);
3534     return DISAS_NEXT;
3535 }
3536 
3537 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3538 {
3539     int shift = s->insn->data & 0xff;
3540     int size = s->insn->data >> 8;
3541     uint64_t mask = ((1ull << size) - 1) << shift;
3542     TCGv_i64 t = tcg_temp_new_i64();
3543 
3544     tcg_gen_shli_i64(t, o->in2, shift);
3545     tcg_gen_or_i64(o->out, o->in1, t);
3546 
3547     /* Produce the CC from only the bits manipulated.  */
3548     tcg_gen_andi_i64(cc_dst, o->out, mask);
3549     set_cc_nz_u64(s, cc_dst);
3550     return DISAS_NEXT;
3551 }
3552 
3553 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3554 {
3555     o->in1 = tcg_temp_new_i64();
3556 
3557     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3558         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3559     } else {
3560         /* Perform the atomic operation in memory. */
3561         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3562                                     s->insn->data);
3563     }
3564 
3565     /* Recompute also for atomic case: needed for setting CC. */
3566     tcg_gen_or_i64(o->out, o->in1, o->in2);
3567 
3568     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3569         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3570     }
3571     return DISAS_NEXT;
3572 }
3573 
3574 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3575 {
3576     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3577 
3578     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3579     return DISAS_NEXT;
3580 }
3581 
3582 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3583 {
3584     int l2 = get_field(s, l2) + 1;
3585     TCGv_i32 l;
3586 
3587     /* The length must not exceed 32 bytes.  */
3588     if (l2 > 32) {
3589         gen_program_exception(s, PGM_SPECIFICATION);
3590         return DISAS_NORETURN;
3591     }
3592     l = tcg_constant_i32(l2);
3593     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3594     return DISAS_NEXT;
3595 }
3596 
3597 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3598 {
3599     int l2 = get_field(s, l2) + 1;
3600     TCGv_i32 l;
3601 
3602     /* The length must be even and should not exceed 64 bytes.  */
3603     if ((l2 & 1) || (l2 > 64)) {
3604         gen_program_exception(s, PGM_SPECIFICATION);
3605         return DISAS_NORETURN;
3606     }
3607     l = tcg_constant_i32(l2);
3608     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3609     return DISAS_NEXT;
3610 }
3611 
3612 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3613 {
3614     const uint8_t m3 = get_field(s, m3);
3615 
3616     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3617         tcg_gen_ctpop_i64(o->out, o->in2);
3618     } else {
3619         gen_helper_popcnt(o->out, o->in2);
3620     }
3621     return DISAS_NEXT;
3622 }
3623 
3624 #ifndef CONFIG_USER_ONLY
3625 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3626 {
3627     gen_helper_ptlb(cpu_env);
3628     return DISAS_NEXT;
3629 }
3630 #endif
3631 
3632 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3633 {
3634     int i3 = get_field(s, i3);
3635     int i4 = get_field(s, i4);
3636     int i5 = get_field(s, i5);
3637     int do_zero = i4 & 0x80;
3638     uint64_t mask, imask, pmask;
3639     int pos, len, rot;
3640 
3641     /* Adjust the arguments for the specific insn.  */
3642     switch (s->fields.op2) {
3643     case 0x55: /* risbg */
3644     case 0x59: /* risbgn */
3645         i3 &= 63;
3646         i4 &= 63;
3647         pmask = ~0;
3648         break;
3649     case 0x5d: /* risbhg */
3650         i3 &= 31;
3651         i4 &= 31;
3652         pmask = 0xffffffff00000000ull;
3653         break;
3654     case 0x51: /* risblg */
3655         i3 = (i3 & 31) + 32;
3656         i4 = (i4 & 31) + 32;
3657         pmask = 0x00000000ffffffffull;
3658         break;
3659     default:
3660         g_assert_not_reached();
3661     }
3662 
3663     /* MASK is the set of bits to be inserted from R2. */
3664     if (i3 <= i4) {
3665         /* [0...i3---i4...63] */
3666         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3667     } else {
3668         /* [0---i4...i3---63] */
3669         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3670     }
3671     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3672     mask &= pmask;
3673 
3674     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3675        insns, we need to keep the other half of the register.  */
3676     imask = ~mask | ~pmask;
3677     if (do_zero) {
3678         imask = ~pmask;
3679     }
3680 
3681     len = i4 - i3 + 1;
3682     pos = 63 - i4;
3683     rot = i5 & 63;
3684 
3685     /* In some cases we can implement this with extract.  */
3686     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3687         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3688         return DISAS_NEXT;
3689     }
3690 
3691     /* In some cases we can implement this with deposit.  */
3692     if (len > 0 && (imask == 0 || ~mask == imask)) {
3693         /* Note that we rotate the bits to be inserted to the lsb, not to
3694            the position as described in the PoO.  */
3695         rot = (rot - pos) & 63;
3696     } else {
3697         pos = -1;
3698     }
3699 
3700     /* Rotate the input as necessary.  */
3701     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3702 
3703     /* Insert the selected bits into the output.  */
3704     if (pos >= 0) {
3705         if (imask == 0) {
3706             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3707         } else {
3708             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3709         }
3710     } else if (imask == 0) {
3711         tcg_gen_andi_i64(o->out, o->in2, mask);
3712     } else {
3713         tcg_gen_andi_i64(o->in2, o->in2, mask);
3714         tcg_gen_andi_i64(o->out, o->out, imask);
3715         tcg_gen_or_i64(o->out, o->out, o->in2);
3716     }
3717     return DISAS_NEXT;
3718 }
3719 
3720 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3721 {
3722     int i3 = get_field(s, i3);
3723     int i4 = get_field(s, i4);
3724     int i5 = get_field(s, i5);
3725     TCGv_i64 orig_out;
3726     uint64_t mask;
3727 
3728     /* If this is a test-only form, arrange to discard the result.  */
3729     if (i3 & 0x80) {
3730         tcg_debug_assert(o->out != NULL);
3731         orig_out = o->out;
3732         o->out = tcg_temp_new_i64();
3733         tcg_gen_mov_i64(o->out, orig_out);
3734     }
3735 
3736     i3 &= 63;
3737     i4 &= 63;
3738     i5 &= 63;
3739 
3740     /* MASK is the set of bits to be operated on from R2.
3741        Take care for I3/I4 wraparound.  */
3742     mask = ~0ull >> i3;
3743     if (i3 <= i4) {
3744         mask ^= ~0ull >> i4 >> 1;
3745     } else {
3746         mask |= ~(~0ull >> i4 >> 1);
3747     }
3748 
3749     /* Rotate the input as necessary.  */
3750     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3751 
3752     /* Operate.  */
3753     switch (s->fields.op2) {
3754     case 0x54: /* AND */
3755         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3756         tcg_gen_and_i64(o->out, o->out, o->in2);
3757         break;
3758     case 0x56: /* OR */
3759         tcg_gen_andi_i64(o->in2, o->in2, mask);
3760         tcg_gen_or_i64(o->out, o->out, o->in2);
3761         break;
3762     case 0x57: /* XOR */
3763         tcg_gen_andi_i64(o->in2, o->in2, mask);
3764         tcg_gen_xor_i64(o->out, o->out, o->in2);
3765         break;
3766     default:
3767         abort();
3768     }
3769 
3770     /* Set the CC.  */
3771     tcg_gen_andi_i64(cc_dst, o->out, mask);
3772     set_cc_nz_u64(s, cc_dst);
3773     return DISAS_NEXT;
3774 }
3775 
3776 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3777 {
3778     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3779     return DISAS_NEXT;
3780 }
3781 
3782 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3783 {
3784     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3785     return DISAS_NEXT;
3786 }
3787 
3788 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3789 {
3790     tcg_gen_bswap64_i64(o->out, o->in2);
3791     return DISAS_NEXT;
3792 }
3793 
3794 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3795 {
3796     TCGv_i32 t1 = tcg_temp_new_i32();
3797     TCGv_i32 t2 = tcg_temp_new_i32();
3798     TCGv_i32 to = tcg_temp_new_i32();
3799     tcg_gen_extrl_i64_i32(t1, o->in1);
3800     tcg_gen_extrl_i64_i32(t2, o->in2);
3801     tcg_gen_rotl_i32(to, t1, t2);
3802     tcg_gen_extu_i32_i64(o->out, to);
3803     return DISAS_NEXT;
3804 }
3805 
3806 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3807 {
3808     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3809     return DISAS_NEXT;
3810 }
3811 
3812 #ifndef CONFIG_USER_ONLY
3813 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3814 {
3815     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3816     set_cc_static(s);
3817     return DISAS_NEXT;
3818 }
3819 
3820 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3821 {
3822     gen_helper_sacf(cpu_env, o->in2);
3823     /* Addressing mode has changed, so end the block.  */
3824     return DISAS_TOO_MANY;
3825 }
3826 #endif
3827 
3828 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3829 {
3830     int sam = s->insn->data;
3831     TCGv_i64 tsam;
3832     uint64_t mask;
3833 
3834     switch (sam) {
3835     case 0:
3836         mask = 0xffffff;
3837         break;
3838     case 1:
3839         mask = 0x7fffffff;
3840         break;
3841     default:
3842         mask = -1;
3843         break;
3844     }
3845 
3846     /* Bizarre but true, we check the address of the current insn for the
3847        specification exception, not the next to be executed.  Thus the PoO
3848        documents that Bad Things Happen two bytes before the end.  */
3849     if (s->base.pc_next & ~mask) {
3850         gen_program_exception(s, PGM_SPECIFICATION);
3851         return DISAS_NORETURN;
3852     }
3853     s->pc_tmp &= mask;
3854 
3855     tsam = tcg_constant_i64(sam);
3856     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3857 
3858     /* Always exit the TB, since we (may have) changed execution mode.  */
3859     return DISAS_TOO_MANY;
3860 }
3861 
3862 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3863 {
3864     int r1 = get_field(s, r1);
3865     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3866     return DISAS_NEXT;
3867 }
3868 
3869 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3870 {
3871     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3872     return DISAS_NEXT;
3873 }
3874 
3875 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3876 {
3877     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3878     return DISAS_NEXT;
3879 }
3880 
3881 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3882 {
3883     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3884     return DISAS_NEXT;
3885 }
3886 
3887 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3888 {
3889     gen_helper_sqeb(o->out, cpu_env, o->in2);
3890     return DISAS_NEXT;
3891 }
3892 
3893 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3894 {
3895     gen_helper_sqdb(o->out, cpu_env, o->in2);
3896     return DISAS_NEXT;
3897 }
3898 
3899 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3900 {
3901     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
3902     return DISAS_NEXT;
3903 }
3904 
3905 #ifndef CONFIG_USER_ONLY
3906 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3907 {
3908     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3909     set_cc_static(s);
3910     return DISAS_NEXT;
3911 }
3912 
3913 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3914 {
3915     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3916     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3917 
3918     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3919     set_cc_static(s);
3920     return DISAS_NEXT;
3921 }
3922 #endif
3923 
3924 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3925 {
3926     DisasCompare c;
3927     TCGv_i64 a, h;
3928     TCGLabel *lab;
3929     int r1;
3930 
3931     disas_jcc(s, &c, get_field(s, m3));
3932 
3933     /* We want to store when the condition is fulfilled, so branch
3934        out when it's not */
3935     c.cond = tcg_invert_cond(c.cond);
3936 
3937     lab = gen_new_label();
3938     if (c.is_64) {
3939         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3940     } else {
3941         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3942     }
3943 
3944     r1 = get_field(s, r1);
3945     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3946     switch (s->insn->data) {
3947     case 1: /* STOCG */
3948         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3949         break;
3950     case 0: /* STOC */
3951         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3952         break;
3953     case 2: /* STOCFH */
3954         h = tcg_temp_new_i64();
3955         tcg_gen_shri_i64(h, regs[r1], 32);
3956         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3957         break;
3958     default:
3959         g_assert_not_reached();
3960     }
3961 
3962     gen_set_label(lab);
3963     return DISAS_NEXT;
3964 }
3965 
3966 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3967 {
3968     TCGv_i64 t;
3969     uint64_t sign = 1ull << s->insn->data;
3970     if (s->insn->data == 31) {
3971         t = tcg_temp_new_i64();
3972         tcg_gen_shli_i64(t, o->in1, 32);
3973     } else {
3974         t = o->in1;
3975     }
3976     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3977     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3978     /* The arithmetic left shift is curious in that it does not affect
3979        the sign bit.  Copy that over from the source unchanged.  */
3980     tcg_gen_andi_i64(o->out, o->out, ~sign);
3981     tcg_gen_andi_i64(o->in1, o->in1, sign);
3982     tcg_gen_or_i64(o->out, o->out, o->in1);
3983     return DISAS_NEXT;
3984 }
3985 
3986 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3987 {
3988     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3989     return DISAS_NEXT;
3990 }
3991 
3992 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3993 {
3994     tcg_gen_sar_i64(o->out, o->in1, o->in2);
3995     return DISAS_NEXT;
3996 }
3997 
3998 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
3999 {
4000     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4001     return DISAS_NEXT;
4002 }
4003 
4004 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4005 {
4006     gen_helper_sfpc(cpu_env, o->in2);
4007     return DISAS_NEXT;
4008 }
4009 
4010 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4011 {
4012     gen_helper_sfas(cpu_env, o->in2);
4013     return DISAS_NEXT;
4014 }
4015 
4016 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4017 {
4018     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4019     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4020     gen_helper_srnm(cpu_env, o->addr1);
4021     return DISAS_NEXT;
4022 }
4023 
4024 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4025 {
4026     /* Bits 0-55 are are ignored. */
4027     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4028     gen_helper_srnm(cpu_env, o->addr1);
4029     return DISAS_NEXT;
4030 }
4031 
4032 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4033 {
4034     TCGv_i64 tmp = tcg_temp_new_i64();
4035 
4036     /* Bits other than 61-63 are ignored. */
4037     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4038 
4039     /* No need to call a helper, we don't implement dfp */
4040     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4041     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4042     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4043     return DISAS_NEXT;
4044 }
4045 
4046 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4047 {
4048     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4049     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4050     set_cc_static(s);
4051 
4052     tcg_gen_shri_i64(o->in1, o->in1, 24);
4053     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4054     return DISAS_NEXT;
4055 }
4056 
4057 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4058 {
4059     int b1 = get_field(s, b1);
4060     int d1 = get_field(s, d1);
4061     int b2 = get_field(s, b2);
4062     int d2 = get_field(s, d2);
4063     int r3 = get_field(s, r3);
4064     TCGv_i64 tmp = tcg_temp_new_i64();
4065 
4066     /* fetch all operands first */
4067     o->in1 = tcg_temp_new_i64();
4068     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4069     o->in2 = tcg_temp_new_i64();
4070     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4071     o->addr1 = tcg_temp_new_i64();
4072     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4073 
4074     /* load the third operand into r3 before modifying anything */
4075     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4076 
4077     /* subtract CPU timer from first operand and store in GR0 */
4078     gen_helper_stpt(tmp, cpu_env);
4079     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4080 
4081     /* store second operand in GR1 */
4082     tcg_gen_mov_i64(regs[1], o->in2);
4083     return DISAS_NEXT;
4084 }
4085 
4086 #ifndef CONFIG_USER_ONLY
4087 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4088 {
4089     tcg_gen_shri_i64(o->in2, o->in2, 4);
4090     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4091     return DISAS_NEXT;
4092 }
4093 
4094 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4095 {
4096     gen_helper_sske(cpu_env, o->in1, o->in2);
4097     return DISAS_NEXT;
4098 }
4099 
4100 static void gen_check_psw_mask(DisasContext *s)
4101 {
4102     TCGv_i64 reserved = tcg_temp_new_i64();
4103     TCGLabel *ok = gen_new_label();
4104 
4105     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4106     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4107     gen_program_exception(s, PGM_SPECIFICATION);
4108     gen_set_label(ok);
4109 }
4110 
4111 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4112 {
4113     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4114 
4115     gen_check_psw_mask(s);
4116 
4117     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4118     s->exit_to_mainloop = true;
4119     return DISAS_TOO_MANY;
4120 }
4121 
4122 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4123 {
4124     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4125     return DISAS_NEXT;
4126 }
4127 #endif
4128 
4129 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4130 {
4131     gen_helper_stck(o->out, cpu_env);
4132     /* ??? We don't implement clock states.  */
4133     gen_op_movi_cc(s, 0);
4134     return DISAS_NEXT;
4135 }
4136 
4137 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4138 {
4139     TCGv_i64 c1 = tcg_temp_new_i64();
4140     TCGv_i64 c2 = tcg_temp_new_i64();
4141     TCGv_i64 todpr = tcg_temp_new_i64();
4142     gen_helper_stck(c1, cpu_env);
4143     /* 16 bit value store in an uint32_t (only valid bits set) */
4144     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4145     /* Shift the 64-bit value into its place as a zero-extended
4146        104-bit value.  Note that "bit positions 64-103 are always
4147        non-zero so that they compare differently to STCK"; we set
4148        the least significant bit to 1.  */
4149     tcg_gen_shli_i64(c2, c1, 56);
4150     tcg_gen_shri_i64(c1, c1, 8);
4151     tcg_gen_ori_i64(c2, c2, 0x10000);
4152     tcg_gen_or_i64(c2, c2, todpr);
4153     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4154     tcg_gen_addi_i64(o->in2, o->in2, 8);
4155     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4156     /* ??? We don't implement clock states.  */
4157     gen_op_movi_cc(s, 0);
4158     return DISAS_NEXT;
4159 }
4160 
4161 #ifndef CONFIG_USER_ONLY
4162 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4163 {
4164     gen_helper_sck(cc_op, cpu_env, o->in2);
4165     set_cc_static(s);
4166     return DISAS_NEXT;
4167 }
4168 
4169 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4170 {
4171     gen_helper_sckc(cpu_env, o->in2);
4172     return DISAS_NEXT;
4173 }
4174 
4175 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4176 {
4177     gen_helper_sckpf(cpu_env, regs[0]);
4178     return DISAS_NEXT;
4179 }
4180 
4181 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4182 {
4183     gen_helper_stckc(o->out, cpu_env);
4184     return DISAS_NEXT;
4185 }
4186 
4187 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4188 {
4189     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4190     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4191 
4192     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4193     return DISAS_NEXT;
4194 }
4195 
4196 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4197 {
4198     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4199     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4200 
4201     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4202     return DISAS_NEXT;
4203 }
4204 
4205 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4206 {
4207     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4208     return DISAS_NEXT;
4209 }
4210 
4211 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4212 {
4213     gen_helper_spt(cpu_env, o->in2);
4214     return DISAS_NEXT;
4215 }
4216 
4217 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4218 {
4219     gen_helper_stfl(cpu_env);
4220     return DISAS_NEXT;
4221 }
4222 
4223 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4224 {
4225     gen_helper_stpt(o->out, cpu_env);
4226     return DISAS_NEXT;
4227 }
4228 
4229 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4230 {
4231     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4232     set_cc_static(s);
4233     return DISAS_NEXT;
4234 }
4235 
4236 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4237 {
4238     gen_helper_spx(cpu_env, o->in2);
4239     return DISAS_NEXT;
4240 }
4241 
4242 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4243 {
4244     gen_helper_xsch(cpu_env, regs[1]);
4245     set_cc_static(s);
4246     return DISAS_NEXT;
4247 }
4248 
4249 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4250 {
4251     gen_helper_csch(cpu_env, regs[1]);
4252     set_cc_static(s);
4253     return DISAS_NEXT;
4254 }
4255 
4256 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4257 {
4258     gen_helper_hsch(cpu_env, regs[1]);
4259     set_cc_static(s);
4260     return DISAS_NEXT;
4261 }
4262 
4263 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4264 {
4265     gen_helper_msch(cpu_env, regs[1], o->in2);
4266     set_cc_static(s);
4267     return DISAS_NEXT;
4268 }
4269 
4270 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4271 {
4272     gen_helper_rchp(cpu_env, regs[1]);
4273     set_cc_static(s);
4274     return DISAS_NEXT;
4275 }
4276 
4277 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4278 {
4279     gen_helper_rsch(cpu_env, regs[1]);
4280     set_cc_static(s);
4281     return DISAS_NEXT;
4282 }
4283 
4284 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4285 {
4286     gen_helper_sal(cpu_env, regs[1]);
4287     return DISAS_NEXT;
4288 }
4289 
4290 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4291 {
4292     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4293     return DISAS_NEXT;
4294 }
4295 
4296 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4297 {
4298     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4299     gen_op_movi_cc(s, 3);
4300     return DISAS_NEXT;
4301 }
4302 
4303 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4304 {
4305     /* The instruction is suppressed if not provided. */
4306     return DISAS_NEXT;
4307 }
4308 
4309 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4310 {
4311     gen_helper_ssch(cpu_env, regs[1], o->in2);
4312     set_cc_static(s);
4313     return DISAS_NEXT;
4314 }
4315 
4316 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4317 {
4318     gen_helper_stsch(cpu_env, regs[1], o->in2);
4319     set_cc_static(s);
4320     return DISAS_NEXT;
4321 }
4322 
4323 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4324 {
4325     gen_helper_stcrw(cpu_env, o->in2);
4326     set_cc_static(s);
4327     return DISAS_NEXT;
4328 }
4329 
4330 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4331 {
4332     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4333     set_cc_static(s);
4334     return DISAS_NEXT;
4335 }
4336 
4337 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4338 {
4339     gen_helper_tsch(cpu_env, regs[1], o->in2);
4340     set_cc_static(s);
4341     return DISAS_NEXT;
4342 }
4343 
4344 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4345 {
4346     gen_helper_chsc(cpu_env, o->in2);
4347     set_cc_static(s);
4348     return DISAS_NEXT;
4349 }
4350 
4351 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4352 {
4353     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4354     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4355     return DISAS_NEXT;
4356 }
4357 
4358 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4359 {
4360     uint64_t i2 = get_field(s, i2);
4361     TCGv_i64 t;
4362 
4363     /* It is important to do what the instruction name says: STORE THEN.
4364        If we let the output hook perform the store then if we fault and
4365        restart, we'll have the wrong SYSTEM MASK in place.  */
4366     t = tcg_temp_new_i64();
4367     tcg_gen_shri_i64(t, psw_mask, 56);
4368     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4369 
4370     if (s->fields.op == 0xac) {
4371         tcg_gen_andi_i64(psw_mask, psw_mask,
4372                          (i2 << 56) | 0x00ffffffffffffffull);
4373     } else {
4374         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4375     }
4376 
4377     gen_check_psw_mask(s);
4378 
4379     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4380     s->exit_to_mainloop = true;
4381     return DISAS_TOO_MANY;
4382 }
4383 
4384 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4385 {
4386     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4387 
4388     if (s->base.tb->flags & FLAG_MASK_PER) {
4389         update_psw_addr(s);
4390         gen_helper_per_store_real(cpu_env);
4391     }
4392     return DISAS_NEXT;
4393 }
4394 #endif
4395 
4396 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4397 {
4398     gen_helper_stfle(cc_op, cpu_env, o->in2);
4399     set_cc_static(s);
4400     return DISAS_NEXT;
4401 }
4402 
4403 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4404 {
4405     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4406     return DISAS_NEXT;
4407 }
4408 
4409 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4410 {
4411     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4412     return DISAS_NEXT;
4413 }
4414 
4415 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4416 {
4417     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4418                        MO_TEUL | s->insn->data);
4419     return DISAS_NEXT;
4420 }
4421 
4422 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4423 {
4424     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4425                         MO_TEUQ | s->insn->data);
4426     return DISAS_NEXT;
4427 }
4428 
4429 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4430 {
4431     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4432     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4433 
4434     gen_helper_stam(cpu_env, r1, o->in2, r3);
4435     return DISAS_NEXT;
4436 }
4437 
4438 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4439 {
4440     int m3 = get_field(s, m3);
4441     int pos, base = s->insn->data;
4442     TCGv_i64 tmp = tcg_temp_new_i64();
4443 
4444     pos = base + ctz32(m3) * 8;
4445     switch (m3) {
4446     case 0xf:
4447         /* Effectively a 32-bit store.  */
4448         tcg_gen_shri_i64(tmp, o->in1, pos);
4449         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4450         break;
4451 
4452     case 0xc:
4453     case 0x6:
4454     case 0x3:
4455         /* Effectively a 16-bit store.  */
4456         tcg_gen_shri_i64(tmp, o->in1, pos);
4457         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4458         break;
4459 
4460     case 0x8:
4461     case 0x4:
4462     case 0x2:
4463     case 0x1:
4464         /* Effectively an 8-bit store.  */
4465         tcg_gen_shri_i64(tmp, o->in1, pos);
4466         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4467         break;
4468 
4469     default:
4470         /* This is going to be a sequence of shifts and stores.  */
4471         pos = base + 32 - 8;
4472         while (m3) {
4473             if (m3 & 0x8) {
4474                 tcg_gen_shri_i64(tmp, o->in1, pos);
4475                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4476                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4477             }
4478             m3 = (m3 << 1) & 0xf;
4479             pos -= 8;
4480         }
4481         break;
4482     }
4483     return DISAS_NEXT;
4484 }
4485 
4486 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4487 {
4488     int r1 = get_field(s, r1);
4489     int r3 = get_field(s, r3);
4490     int size = s->insn->data;
4491     TCGv_i64 tsize = tcg_constant_i64(size);
4492 
4493     while (1) {
4494         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4495                             size == 8 ? MO_TEUQ : MO_TEUL);
4496         if (r1 == r3) {
4497             break;
4498         }
4499         tcg_gen_add_i64(o->in2, o->in2, tsize);
4500         r1 = (r1 + 1) & 15;
4501     }
4502 
4503     return DISAS_NEXT;
4504 }
4505 
4506 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4507 {
4508     int r1 = get_field(s, r1);
4509     int r3 = get_field(s, r3);
4510     TCGv_i64 t = tcg_temp_new_i64();
4511     TCGv_i64 t4 = tcg_constant_i64(4);
4512     TCGv_i64 t32 = tcg_constant_i64(32);
4513 
4514     while (1) {
4515         tcg_gen_shl_i64(t, regs[r1], t32);
4516         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4517         if (r1 == r3) {
4518             break;
4519         }
4520         tcg_gen_add_i64(o->in2, o->in2, t4);
4521         r1 = (r1 + 1) & 15;
4522     }
4523     return DISAS_NEXT;
4524 }
4525 
4526 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4527 {
4528     TCGv_i128 t16 = tcg_temp_new_i128();
4529 
4530     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4531     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4532                          MO_TE | MO_128 | MO_ALIGN);
4533     return DISAS_NEXT;
4534 }
4535 
4536 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4537 {
4538     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4539     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4540 
4541     gen_helper_srst(cpu_env, r1, r2);
4542     set_cc_static(s);
4543     return DISAS_NEXT;
4544 }
4545 
4546 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4547 {
4548     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4549     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4550 
4551     gen_helper_srstu(cpu_env, r1, r2);
4552     set_cc_static(s);
4553     return DISAS_NEXT;
4554 }
4555 
4556 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4557 {
4558     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4559     return DISAS_NEXT;
4560 }
4561 
4562 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4563 {
4564     tcg_gen_movi_i64(cc_src, 0);
4565     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4566     return DISAS_NEXT;
4567 }
4568 
4569 /* Compute borrow (0, -1) into cc_src. */
4570 static void compute_borrow(DisasContext *s)
4571 {
4572     switch (s->cc_op) {
4573     case CC_OP_SUBU:
4574         /* The borrow value is already in cc_src (0,-1). */
4575         break;
4576     default:
4577         gen_op_calc_cc(s);
4578         /* fall through */
4579     case CC_OP_STATIC:
4580         /* The carry flag is the msb of CC; compute into cc_src. */
4581         tcg_gen_extu_i32_i64(cc_src, cc_op);
4582         tcg_gen_shri_i64(cc_src, cc_src, 1);
4583         /* fall through */
4584     case CC_OP_ADDU:
4585         /* Convert carry (1,0) to borrow (0,-1). */
4586         tcg_gen_subi_i64(cc_src, cc_src, 1);
4587         break;
4588     }
4589 }
4590 
4591 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4592 {
4593     compute_borrow(s);
4594 
4595     /* Borrow is {0, -1}, so add to subtract. */
4596     tcg_gen_add_i64(o->out, o->in1, cc_src);
4597     tcg_gen_sub_i64(o->out, o->out, o->in2);
4598     return DISAS_NEXT;
4599 }
4600 
4601 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4602 {
4603     compute_borrow(s);
4604 
4605     /*
4606      * Borrow is {0, -1}, so add to subtract; replicate the
4607      * borrow input to produce 128-bit -1 for the addition.
4608      */
4609     TCGv_i64 zero = tcg_constant_i64(0);
4610     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4611     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4612 
4613     return DISAS_NEXT;
4614 }
4615 
4616 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4617 {
4618     TCGv_i32 t;
4619 
4620     update_psw_addr(s);
4621     update_cc_op(s);
4622 
4623     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4624     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4625 
4626     t = tcg_constant_i32(s->ilen);
4627     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4628 
4629     gen_exception(EXCP_SVC);
4630     return DISAS_NORETURN;
4631 }
4632 
4633 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4634 {
4635     int cc = 0;
4636 
4637     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4638     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4639     gen_op_movi_cc(s, cc);
4640     return DISAS_NEXT;
4641 }
4642 
4643 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4644 {
4645     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4646     set_cc_static(s);
4647     return DISAS_NEXT;
4648 }
4649 
4650 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4651 {
4652     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4653     set_cc_static(s);
4654     return DISAS_NEXT;
4655 }
4656 
4657 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4658 {
4659     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4660     set_cc_static(s);
4661     return DISAS_NEXT;
4662 }
4663 
4664 #ifndef CONFIG_USER_ONLY
4665 
4666 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4667 {
4668     gen_helper_testblock(cc_op, cpu_env, o->in2);
4669     set_cc_static(s);
4670     return DISAS_NEXT;
4671 }
4672 
4673 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4674 {
4675     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4676     set_cc_static(s);
4677     return DISAS_NEXT;
4678 }
4679 
4680 #endif
4681 
4682 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4683 {
4684     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4685 
4686     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4687     set_cc_static(s);
4688     return DISAS_NEXT;
4689 }
4690 
4691 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4692 {
4693     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4694 
4695     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4696     set_cc_static(s);
4697     return DISAS_NEXT;
4698 }
4699 
4700 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4701 {
4702     TCGv_i128 pair = tcg_temp_new_i128();
4703 
4704     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4705     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4706     set_cc_static(s);
4707     return DISAS_NEXT;
4708 }
4709 
4710 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4711 {
4712     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4713 
4714     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4715     set_cc_static(s);
4716     return DISAS_NEXT;
4717 }
4718 
4719 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4720 {
4721     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4722 
4723     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4724     set_cc_static(s);
4725     return DISAS_NEXT;
4726 }
4727 
4728 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4729 {
4730     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4731     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4732     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4733     TCGv_i32 tst = tcg_temp_new_i32();
4734     int m3 = get_field(s, m3);
4735 
4736     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4737         m3 = 0;
4738     }
4739     if (m3 & 1) {
4740         tcg_gen_movi_i32(tst, -1);
4741     } else {
4742         tcg_gen_extrl_i64_i32(tst, regs[0]);
4743         if (s->insn->opc & 3) {
4744             tcg_gen_ext8u_i32(tst, tst);
4745         } else {
4746             tcg_gen_ext16u_i32(tst, tst);
4747         }
4748     }
4749     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4750 
4751     set_cc_static(s);
4752     return DISAS_NEXT;
4753 }
4754 
4755 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4756 {
4757     TCGv_i32 t1 = tcg_constant_i32(0xff);
4758 
4759     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4760     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4761     set_cc_static(s);
4762     return DISAS_NEXT;
4763 }
4764 
4765 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4766 {
4767     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4768 
4769     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4770     return DISAS_NEXT;
4771 }
4772 
4773 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4774 {
4775     int l1 = get_field(s, l1) + 1;
4776     TCGv_i32 l;
4777 
4778     /* The length must not exceed 32 bytes.  */
4779     if (l1 > 32) {
4780         gen_program_exception(s, PGM_SPECIFICATION);
4781         return DISAS_NORETURN;
4782     }
4783     l = tcg_constant_i32(l1);
4784     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4785     set_cc_static(s);
4786     return DISAS_NEXT;
4787 }
4788 
4789 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4790 {
4791     int l1 = get_field(s, l1) + 1;
4792     TCGv_i32 l;
4793 
4794     /* The length must be even and should not exceed 64 bytes.  */
4795     if ((l1 & 1) || (l1 > 64)) {
4796         gen_program_exception(s, PGM_SPECIFICATION);
4797         return DISAS_NORETURN;
4798     }
4799     l = tcg_constant_i32(l1);
4800     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4801     set_cc_static(s);
4802     return DISAS_NEXT;
4803 }
4804 
4805 
4806 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4807 {
4808     int d1 = get_field(s, d1);
4809     int d2 = get_field(s, d2);
4810     int b1 = get_field(s, b1);
4811     int b2 = get_field(s, b2);
4812     int l = get_field(s, l1);
4813     TCGv_i32 t32;
4814 
4815     o->addr1 = get_address(s, 0, b1, d1);
4816 
4817     /* If the addresses are identical, this is a store/memset of zero.  */
4818     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4819         o->in2 = tcg_constant_i64(0);
4820 
4821         l++;
4822         while (l >= 8) {
4823             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4824             l -= 8;
4825             if (l > 0) {
4826                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4827             }
4828         }
4829         if (l >= 4) {
4830             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4831             l -= 4;
4832             if (l > 0) {
4833                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4834             }
4835         }
4836         if (l >= 2) {
4837             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4838             l -= 2;
4839             if (l > 0) {
4840                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4841             }
4842         }
4843         if (l) {
4844             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4845         }
4846         gen_op_movi_cc(s, 0);
4847         return DISAS_NEXT;
4848     }
4849 
4850     /* But in general we'll defer to a helper.  */
4851     o->in2 = get_address(s, 0, b2, d2);
4852     t32 = tcg_constant_i32(l);
4853     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4854     set_cc_static(s);
4855     return DISAS_NEXT;
4856 }
4857 
4858 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4859 {
4860     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4861     return DISAS_NEXT;
4862 }
4863 
4864 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4865 {
4866     int shift = s->insn->data & 0xff;
4867     int size = s->insn->data >> 8;
4868     uint64_t mask = ((1ull << size) - 1) << shift;
4869     TCGv_i64 t = tcg_temp_new_i64();
4870 
4871     tcg_gen_shli_i64(t, o->in2, shift);
4872     tcg_gen_xor_i64(o->out, o->in1, t);
4873 
4874     /* Produce the CC from only the bits manipulated.  */
4875     tcg_gen_andi_i64(cc_dst, o->out, mask);
4876     set_cc_nz_u64(s, cc_dst);
4877     return DISAS_NEXT;
4878 }
4879 
4880 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4881 {
4882     o->in1 = tcg_temp_new_i64();
4883 
4884     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4885         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4886     } else {
4887         /* Perform the atomic operation in memory. */
4888         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4889                                      s->insn->data);
4890     }
4891 
4892     /* Recompute also for atomic case: needed for setting CC. */
4893     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4894 
4895     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4896         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4897     }
4898     return DISAS_NEXT;
4899 }
4900 
4901 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4902 {
4903     o->out = tcg_constant_i64(0);
4904     return DISAS_NEXT;
4905 }
4906 
4907 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4908 {
4909     o->out = tcg_constant_i64(0);
4910     o->out2 = o->out;
4911     return DISAS_NEXT;
4912 }
4913 
4914 #ifndef CONFIG_USER_ONLY
4915 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4916 {
4917     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4918 
4919     gen_helper_clp(cpu_env, r2);
4920     set_cc_static(s);
4921     return DISAS_NEXT;
4922 }
4923 
4924 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4925 {
4926     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4927     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4928 
4929     gen_helper_pcilg(cpu_env, r1, r2);
4930     set_cc_static(s);
4931     return DISAS_NEXT;
4932 }
4933 
4934 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4935 {
4936     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4937     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4938 
4939     gen_helper_pcistg(cpu_env, r1, r2);
4940     set_cc_static(s);
4941     return DISAS_NEXT;
4942 }
4943 
4944 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4945 {
4946     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4947     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4948 
4949     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4950     set_cc_static(s);
4951     return DISAS_NEXT;
4952 }
4953 
4954 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4955 {
4956     gen_helper_sic(cpu_env, o->in1, o->in2);
4957     return DISAS_NEXT;
4958 }
4959 
4960 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4961 {
4962     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4963     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4964 
4965     gen_helper_rpcit(cpu_env, r1, r2);
4966     set_cc_static(s);
4967     return DISAS_NEXT;
4968 }
4969 
4970 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4971 {
4972     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4973     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4974     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4975 
4976     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4977     set_cc_static(s);
4978     return DISAS_NEXT;
4979 }
4980 
4981 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4982 {
4983     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4984     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4985 
4986     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4987     set_cc_static(s);
4988     return DISAS_NEXT;
4989 }
4990 #endif
4991 
4992 #include "translate_vx.c.inc"
4993 
4994 /* ====================================================================== */
4995 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
4996    the original inputs), update the various cc data structures in order to
4997    be able to compute the new condition code.  */
4998 
4999 static void cout_abs32(DisasContext *s, DisasOps *o)
5000 {
5001     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5002 }
5003 
5004 static void cout_abs64(DisasContext *s, DisasOps *o)
5005 {
5006     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5007 }
5008 
5009 static void cout_adds32(DisasContext *s, DisasOps *o)
5010 {
5011     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5012 }
5013 
5014 static void cout_adds64(DisasContext *s, DisasOps *o)
5015 {
5016     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5017 }
5018 
5019 static void cout_addu32(DisasContext *s, DisasOps *o)
5020 {
5021     tcg_gen_shri_i64(cc_src, o->out, 32);
5022     tcg_gen_ext32u_i64(cc_dst, o->out);
5023     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5024 }
5025 
5026 static void cout_addu64(DisasContext *s, DisasOps *o)
5027 {
5028     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5029 }
5030 
5031 static void cout_cmps32(DisasContext *s, DisasOps *o)
5032 {
5033     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5034 }
5035 
5036 static void cout_cmps64(DisasContext *s, DisasOps *o)
5037 {
5038     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5039 }
5040 
5041 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5042 {
5043     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5044 }
5045 
5046 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5047 {
5048     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5049 }
5050 
5051 static void cout_f32(DisasContext *s, DisasOps *o)
5052 {
5053     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5054 }
5055 
5056 static void cout_f64(DisasContext *s, DisasOps *o)
5057 {
5058     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5059 }
5060 
5061 static void cout_f128(DisasContext *s, DisasOps *o)
5062 {
5063     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5064 }
5065 
5066 static void cout_nabs32(DisasContext *s, DisasOps *o)
5067 {
5068     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5069 }
5070 
5071 static void cout_nabs64(DisasContext *s, DisasOps *o)
5072 {
5073     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5074 }
5075 
5076 static void cout_neg32(DisasContext *s, DisasOps *o)
5077 {
5078     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5079 }
5080 
5081 static void cout_neg64(DisasContext *s, DisasOps *o)
5082 {
5083     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5084 }
5085 
5086 static void cout_nz32(DisasContext *s, DisasOps *o)
5087 {
5088     tcg_gen_ext32u_i64(cc_dst, o->out);
5089     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5090 }
5091 
5092 static void cout_nz64(DisasContext *s, DisasOps *o)
5093 {
5094     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5095 }
5096 
5097 static void cout_s32(DisasContext *s, DisasOps *o)
5098 {
5099     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5100 }
5101 
5102 static void cout_s64(DisasContext *s, DisasOps *o)
5103 {
5104     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5105 }
5106 
5107 static void cout_subs32(DisasContext *s, DisasOps *o)
5108 {
5109     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5110 }
5111 
5112 static void cout_subs64(DisasContext *s, DisasOps *o)
5113 {
5114     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5115 }
5116 
5117 static void cout_subu32(DisasContext *s, DisasOps *o)
5118 {
5119     tcg_gen_sari_i64(cc_src, o->out, 32);
5120     tcg_gen_ext32u_i64(cc_dst, o->out);
5121     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5122 }
5123 
5124 static void cout_subu64(DisasContext *s, DisasOps *o)
5125 {
5126     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5127 }
5128 
5129 static void cout_tm32(DisasContext *s, DisasOps *o)
5130 {
5131     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5132 }
5133 
5134 static void cout_tm64(DisasContext *s, DisasOps *o)
5135 {
5136     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5137 }
5138 
5139 static void cout_muls32(DisasContext *s, DisasOps *o)
5140 {
5141     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5142 }
5143 
5144 static void cout_muls64(DisasContext *s, DisasOps *o)
5145 {
5146     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5147     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5148 }
5149 
5150 /* ====================================================================== */
5151 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5152    with the TCG register to which we will write.  Used in combination with
5153    the "wout" generators, in some cases we need a new temporary, and in
5154    some cases we can write to a TCG global.  */
5155 
5156 static void prep_new(DisasContext *s, DisasOps *o)
5157 {
5158     o->out = tcg_temp_new_i64();
5159 }
5160 #define SPEC_prep_new 0
5161 
5162 static void prep_new_P(DisasContext *s, DisasOps *o)
5163 {
5164     o->out = tcg_temp_new_i64();
5165     o->out2 = tcg_temp_new_i64();
5166 }
5167 #define SPEC_prep_new_P 0
5168 
5169 static void prep_new_x(DisasContext *s, DisasOps *o)
5170 {
5171     o->out_128 = tcg_temp_new_i128();
5172 }
5173 #define SPEC_prep_new_x 0
5174 
5175 static void prep_r1(DisasContext *s, DisasOps *o)
5176 {
5177     o->out = regs[get_field(s, r1)];
5178 }
5179 #define SPEC_prep_r1 0
5180 
5181 static void prep_r1_P(DisasContext *s, DisasOps *o)
5182 {
5183     int r1 = get_field(s, r1);
5184     o->out = regs[r1];
5185     o->out2 = regs[r1 + 1];
5186 }
5187 #define SPEC_prep_r1_P SPEC_r1_even
5188 
5189 static void prep_x1(DisasContext *s, DisasOps *o)
5190 {
5191     o->out_128 = load_freg_128(get_field(s, r1));
5192 }
5193 #define SPEC_prep_x1 SPEC_r1_f128
5194 
5195 /* ====================================================================== */
5196 /* The "Write OUTput" generators.  These generally perform some non-trivial
5197    copy of data to TCG globals, or to main memory.  The trivial cases are
5198    generally handled by having a "prep" generator install the TCG global
5199    as the destination of the operation.  */
5200 
5201 static void wout_r1(DisasContext *s, DisasOps *o)
5202 {
5203     store_reg(get_field(s, r1), o->out);
5204 }
5205 #define SPEC_wout_r1 0
5206 
5207 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5208 {
5209     store_reg(get_field(s, r1), o->out2);
5210 }
5211 #define SPEC_wout_out2_r1 0
5212 
5213 static void wout_r1_8(DisasContext *s, DisasOps *o)
5214 {
5215     int r1 = get_field(s, r1);
5216     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5217 }
5218 #define SPEC_wout_r1_8 0
5219 
5220 static void wout_r1_16(DisasContext *s, DisasOps *o)
5221 {
5222     int r1 = get_field(s, r1);
5223     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5224 }
5225 #define SPEC_wout_r1_16 0
5226 
5227 static void wout_r1_32(DisasContext *s, DisasOps *o)
5228 {
5229     store_reg32_i64(get_field(s, r1), o->out);
5230 }
5231 #define SPEC_wout_r1_32 0
5232 
5233 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5234 {
5235     store_reg32h_i64(get_field(s, r1), o->out);
5236 }
5237 #define SPEC_wout_r1_32h 0
5238 
5239 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5240 {
5241     int r1 = get_field(s, r1);
5242     store_reg32_i64(r1, o->out);
5243     store_reg32_i64(r1 + 1, o->out2);
5244 }
5245 #define SPEC_wout_r1_P32 SPEC_r1_even
5246 
5247 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5248 {
5249     int r1 = get_field(s, r1);
5250     TCGv_i64 t = tcg_temp_new_i64();
5251     store_reg32_i64(r1 + 1, o->out);
5252     tcg_gen_shri_i64(t, o->out, 32);
5253     store_reg32_i64(r1, t);
5254 }
5255 #define SPEC_wout_r1_D32 SPEC_r1_even
5256 
5257 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5258 {
5259     int r1 = get_field(s, r1);
5260     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5261 }
5262 #define SPEC_wout_r1_D64 SPEC_r1_even
5263 
5264 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5265 {
5266     int r3 = get_field(s, r3);
5267     store_reg32_i64(r3, o->out);
5268     store_reg32_i64(r3 + 1, o->out2);
5269 }
5270 #define SPEC_wout_r3_P32 SPEC_r3_even
5271 
5272 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5273 {
5274     int r3 = get_field(s, r3);
5275     store_reg(r3, o->out);
5276     store_reg(r3 + 1, o->out2);
5277 }
5278 #define SPEC_wout_r3_P64 SPEC_r3_even
5279 
5280 static void wout_e1(DisasContext *s, DisasOps *o)
5281 {
5282     store_freg32_i64(get_field(s, r1), o->out);
5283 }
5284 #define SPEC_wout_e1 0
5285 
5286 static void wout_f1(DisasContext *s, DisasOps *o)
5287 {
5288     store_freg(get_field(s, r1), o->out);
5289 }
5290 #define SPEC_wout_f1 0
5291 
5292 static void wout_x1(DisasContext *s, DisasOps *o)
5293 {
5294     int f1 = get_field(s, r1);
5295 
5296     /* Split out_128 into out+out2 for cout_f128. */
5297     tcg_debug_assert(o->out == NULL);
5298     o->out = tcg_temp_new_i64();
5299     o->out2 = tcg_temp_new_i64();
5300 
5301     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5302     store_freg(f1, o->out);
5303     store_freg(f1 + 2, o->out2);
5304 }
5305 #define SPEC_wout_x1 SPEC_r1_f128
5306 
5307 static void wout_x1_P(DisasContext *s, DisasOps *o)
5308 {
5309     int f1 = get_field(s, r1);
5310     store_freg(f1, o->out);
5311     store_freg(f1 + 2, o->out2);
5312 }
5313 #define SPEC_wout_x1_P SPEC_r1_f128
5314 
5315 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5316 {
5317     if (get_field(s, r1) != get_field(s, r2)) {
5318         store_reg32_i64(get_field(s, r1), o->out);
5319     }
5320 }
5321 #define SPEC_wout_cond_r1r2_32 0
5322 
5323 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5324 {
5325     if (get_field(s, r1) != get_field(s, r2)) {
5326         store_freg32_i64(get_field(s, r1), o->out);
5327     }
5328 }
5329 #define SPEC_wout_cond_e1e2 0
5330 
5331 static void wout_m1_8(DisasContext *s, DisasOps *o)
5332 {
5333     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5334 }
5335 #define SPEC_wout_m1_8 0
5336 
5337 static void wout_m1_16(DisasContext *s, DisasOps *o)
5338 {
5339     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5340 }
5341 #define SPEC_wout_m1_16 0
5342 
5343 #ifndef CONFIG_USER_ONLY
5344 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5345 {
5346     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5347 }
5348 #define SPEC_wout_m1_16a 0
5349 #endif
5350 
5351 static void wout_m1_32(DisasContext *s, DisasOps *o)
5352 {
5353     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5354 }
5355 #define SPEC_wout_m1_32 0
5356 
5357 #ifndef CONFIG_USER_ONLY
5358 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5359 {
5360     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5361 }
5362 #define SPEC_wout_m1_32a 0
5363 #endif
5364 
5365 static void wout_m1_64(DisasContext *s, DisasOps *o)
5366 {
5367     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5368 }
5369 #define SPEC_wout_m1_64 0
5370 
5371 #ifndef CONFIG_USER_ONLY
5372 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5373 {
5374     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5375 }
5376 #define SPEC_wout_m1_64a 0
5377 #endif
5378 
5379 static void wout_m2_32(DisasContext *s, DisasOps *o)
5380 {
5381     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5382 }
5383 #define SPEC_wout_m2_32 0
5384 
5385 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5386 {
5387     store_reg(get_field(s, r1), o->in2);
5388 }
5389 #define SPEC_wout_in2_r1 0
5390 
5391 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5392 {
5393     store_reg32_i64(get_field(s, r1), o->in2);
5394 }
5395 #define SPEC_wout_in2_r1_32 0
5396 
5397 /* ====================================================================== */
5398 /* The "INput 1" generators.  These load the first operand to an insn.  */
5399 
5400 static void in1_r1(DisasContext *s, DisasOps *o)
5401 {
5402     o->in1 = load_reg(get_field(s, r1));
5403 }
5404 #define SPEC_in1_r1 0
5405 
5406 static void in1_r1_o(DisasContext *s, DisasOps *o)
5407 {
5408     o->in1 = regs[get_field(s, r1)];
5409 }
5410 #define SPEC_in1_r1_o 0
5411 
5412 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5413 {
5414     o->in1 = tcg_temp_new_i64();
5415     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5416 }
5417 #define SPEC_in1_r1_32s 0
5418 
5419 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5420 {
5421     o->in1 = tcg_temp_new_i64();
5422     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5423 }
5424 #define SPEC_in1_r1_32u 0
5425 
5426 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5427 {
5428     o->in1 = tcg_temp_new_i64();
5429     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5430 }
5431 #define SPEC_in1_r1_sr32 0
5432 
5433 static void in1_r1p1(DisasContext *s, DisasOps *o)
5434 {
5435     o->in1 = load_reg(get_field(s, r1) + 1);
5436 }
5437 #define SPEC_in1_r1p1 SPEC_r1_even
5438 
5439 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5440 {
5441     o->in1 = regs[get_field(s, r1) + 1];
5442 }
5443 #define SPEC_in1_r1p1_o SPEC_r1_even
5444 
5445 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5446 {
5447     o->in1 = tcg_temp_new_i64();
5448     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5449 }
5450 #define SPEC_in1_r1p1_32s SPEC_r1_even
5451 
5452 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5453 {
5454     o->in1 = tcg_temp_new_i64();
5455     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5456 }
5457 #define SPEC_in1_r1p1_32u SPEC_r1_even
5458 
5459 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5460 {
5461     int r1 = get_field(s, r1);
5462     o->in1 = tcg_temp_new_i64();
5463     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5464 }
5465 #define SPEC_in1_r1_D32 SPEC_r1_even
5466 
5467 static void in1_r2(DisasContext *s, DisasOps *o)
5468 {
5469     o->in1 = load_reg(get_field(s, r2));
5470 }
5471 #define SPEC_in1_r2 0
5472 
5473 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5474 {
5475     o->in1 = tcg_temp_new_i64();
5476     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5477 }
5478 #define SPEC_in1_r2_sr32 0
5479 
5480 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5481 {
5482     o->in1 = tcg_temp_new_i64();
5483     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5484 }
5485 #define SPEC_in1_r2_32u 0
5486 
5487 static void in1_r3(DisasContext *s, DisasOps *o)
5488 {
5489     o->in1 = load_reg(get_field(s, r3));
5490 }
5491 #define SPEC_in1_r3 0
5492 
5493 static void in1_r3_o(DisasContext *s, DisasOps *o)
5494 {
5495     o->in1 = regs[get_field(s, r3)];
5496 }
5497 #define SPEC_in1_r3_o 0
5498 
5499 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5500 {
5501     o->in1 = tcg_temp_new_i64();
5502     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5503 }
5504 #define SPEC_in1_r3_32s 0
5505 
5506 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5507 {
5508     o->in1 = tcg_temp_new_i64();
5509     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5510 }
5511 #define SPEC_in1_r3_32u 0
5512 
5513 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5514 {
5515     int r3 = get_field(s, r3);
5516     o->in1 = tcg_temp_new_i64();
5517     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5518 }
5519 #define SPEC_in1_r3_D32 SPEC_r3_even
5520 
5521 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5522 {
5523     o->in1 = tcg_temp_new_i64();
5524     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5525 }
5526 #define SPEC_in1_r3_sr32 0
5527 
5528 static void in1_e1(DisasContext *s, DisasOps *o)
5529 {
5530     o->in1 = load_freg32_i64(get_field(s, r1));
5531 }
5532 #define SPEC_in1_e1 0
5533 
5534 static void in1_f1(DisasContext *s, DisasOps *o)
5535 {
5536     o->in1 = load_freg(get_field(s, r1));
5537 }
5538 #define SPEC_in1_f1 0
5539 
5540 static void in1_x1(DisasContext *s, DisasOps *o)
5541 {
5542     o->in1_128 = load_freg_128(get_field(s, r1));
5543 }
5544 #define SPEC_in1_x1 SPEC_r1_f128
5545 
5546 /* Load the high double word of an extended (128-bit) format FP number */
5547 static void in1_x2h(DisasContext *s, DisasOps *o)
5548 {
5549     o->in1 = load_freg(get_field(s, r2));
5550 }
5551 #define SPEC_in1_x2h SPEC_r2_f128
5552 
5553 static void in1_f3(DisasContext *s, DisasOps *o)
5554 {
5555     o->in1 = load_freg(get_field(s, r3));
5556 }
5557 #define SPEC_in1_f3 0
5558 
5559 static void in1_la1(DisasContext *s, DisasOps *o)
5560 {
5561     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5562 }
5563 #define SPEC_in1_la1 0
5564 
5565 static void in1_la2(DisasContext *s, DisasOps *o)
5566 {
5567     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5568     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5569 }
5570 #define SPEC_in1_la2 0
5571 
5572 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5573 {
5574     in1_la1(s, o);
5575     o->in1 = tcg_temp_new_i64();
5576     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5577 }
5578 #define SPEC_in1_m1_8u 0
5579 
5580 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5581 {
5582     in1_la1(s, o);
5583     o->in1 = tcg_temp_new_i64();
5584     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5585 }
5586 #define SPEC_in1_m1_16s 0
5587 
5588 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5589 {
5590     in1_la1(s, o);
5591     o->in1 = tcg_temp_new_i64();
5592     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5593 }
5594 #define SPEC_in1_m1_16u 0
5595 
5596 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5597 {
5598     in1_la1(s, o);
5599     o->in1 = tcg_temp_new_i64();
5600     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5601 }
5602 #define SPEC_in1_m1_32s 0
5603 
5604 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5605 {
5606     in1_la1(s, o);
5607     o->in1 = tcg_temp_new_i64();
5608     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5609 }
5610 #define SPEC_in1_m1_32u 0
5611 
5612 static void in1_m1_64(DisasContext *s, DisasOps *o)
5613 {
5614     in1_la1(s, o);
5615     o->in1 = tcg_temp_new_i64();
5616     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5617 }
5618 #define SPEC_in1_m1_64 0
5619 
5620 /* ====================================================================== */
5621 /* The "INput 2" generators.  These load the second operand to an insn.  */
5622 
5623 static void in2_r1_o(DisasContext *s, DisasOps *o)
5624 {
5625     o->in2 = regs[get_field(s, r1)];
5626 }
5627 #define SPEC_in2_r1_o 0
5628 
5629 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5630 {
5631     o->in2 = tcg_temp_new_i64();
5632     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5633 }
5634 #define SPEC_in2_r1_16u 0
5635 
5636 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5637 {
5638     o->in2 = tcg_temp_new_i64();
5639     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5640 }
5641 #define SPEC_in2_r1_32u 0
5642 
5643 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5644 {
5645     int r1 = get_field(s, r1);
5646     o->in2 = tcg_temp_new_i64();
5647     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5648 }
5649 #define SPEC_in2_r1_D32 SPEC_r1_even
5650 
5651 static void in2_r2(DisasContext *s, DisasOps *o)
5652 {
5653     o->in2 = load_reg(get_field(s, r2));
5654 }
5655 #define SPEC_in2_r2 0
5656 
5657 static void in2_r2_o(DisasContext *s, DisasOps *o)
5658 {
5659     o->in2 = regs[get_field(s, r2)];
5660 }
5661 #define SPEC_in2_r2_o 0
5662 
5663 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5664 {
5665     int r2 = get_field(s, r2);
5666     if (r2 != 0) {
5667         o->in2 = load_reg(r2);
5668     }
5669 }
5670 #define SPEC_in2_r2_nz 0
5671 
5672 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5673 {
5674     o->in2 = tcg_temp_new_i64();
5675     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5676 }
5677 #define SPEC_in2_r2_8s 0
5678 
5679 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5680 {
5681     o->in2 = tcg_temp_new_i64();
5682     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5683 }
5684 #define SPEC_in2_r2_8u 0
5685 
5686 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5687 {
5688     o->in2 = tcg_temp_new_i64();
5689     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5690 }
5691 #define SPEC_in2_r2_16s 0
5692 
5693 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5694 {
5695     o->in2 = tcg_temp_new_i64();
5696     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5697 }
5698 #define SPEC_in2_r2_16u 0
5699 
5700 static void in2_r3(DisasContext *s, DisasOps *o)
5701 {
5702     o->in2 = load_reg(get_field(s, r3));
5703 }
5704 #define SPEC_in2_r3 0
5705 
5706 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5707 {
5708     int r3 = get_field(s, r3);
5709     o->in2_128 = tcg_temp_new_i128();
5710     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5711 }
5712 #define SPEC_in2_r3_D64 SPEC_r3_even
5713 
5714 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5715 {
5716     o->in2 = tcg_temp_new_i64();
5717     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5718 }
5719 #define SPEC_in2_r3_sr32 0
5720 
5721 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5722 {
5723     o->in2 = tcg_temp_new_i64();
5724     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5725 }
5726 #define SPEC_in2_r3_32u 0
5727 
5728 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5729 {
5730     o->in2 = tcg_temp_new_i64();
5731     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5732 }
5733 #define SPEC_in2_r2_32s 0
5734 
5735 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5736 {
5737     o->in2 = tcg_temp_new_i64();
5738     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5739 }
5740 #define SPEC_in2_r2_32u 0
5741 
5742 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5743 {
5744     o->in2 = tcg_temp_new_i64();
5745     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5746 }
5747 #define SPEC_in2_r2_sr32 0
5748 
5749 static void in2_e2(DisasContext *s, DisasOps *o)
5750 {
5751     o->in2 = load_freg32_i64(get_field(s, r2));
5752 }
5753 #define SPEC_in2_e2 0
5754 
5755 static void in2_f2(DisasContext *s, DisasOps *o)
5756 {
5757     o->in2 = load_freg(get_field(s, r2));
5758 }
5759 #define SPEC_in2_f2 0
5760 
5761 static void in2_x2(DisasContext *s, DisasOps *o)
5762 {
5763     o->in2_128 = load_freg_128(get_field(s, r2));
5764 }
5765 #define SPEC_in2_x2 SPEC_r2_f128
5766 
5767 /* Load the low double word of an extended (128-bit) format FP number */
5768 static void in2_x2l(DisasContext *s, DisasOps *o)
5769 {
5770     o->in2 = load_freg(get_field(s, r2) + 2);
5771 }
5772 #define SPEC_in2_x2l SPEC_r2_f128
5773 
5774 static void in2_ra2(DisasContext *s, DisasOps *o)
5775 {
5776     int r2 = get_field(s, r2);
5777 
5778     /* Note: *don't* treat !r2 as 0, use the reg value. */
5779     o->in2 = tcg_temp_new_i64();
5780     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5781 }
5782 #define SPEC_in2_ra2 0
5783 
5784 static void in2_a2(DisasContext *s, DisasOps *o)
5785 {
5786     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5787     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5788 }
5789 #define SPEC_in2_a2 0
5790 
5791 static TCGv gen_ri2(DisasContext *s)
5792 {
5793     TCGv ri2 = NULL;
5794     bool is_imm;
5795     int imm;
5796 
5797     disas_jdest(s, i2, is_imm, imm, ri2);
5798     if (is_imm) {
5799         ri2 = tcg_constant_i64(s->base.pc_next + imm * 2);
5800     }
5801 
5802     return ri2;
5803 }
5804 
5805 static void in2_ri2(DisasContext *s, DisasOps *o)
5806 {
5807     o->in2 = gen_ri2(s);
5808 }
5809 #define SPEC_in2_ri2 0
5810 
5811 static void in2_sh(DisasContext *s, DisasOps *o)
5812 {
5813     int b2 = get_field(s, b2);
5814     int d2 = get_field(s, d2);
5815 
5816     if (b2 == 0) {
5817         o->in2 = tcg_constant_i64(d2 & 0x3f);
5818     } else {
5819         o->in2 = get_address(s, 0, b2, d2);
5820         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5821     }
5822 }
5823 #define SPEC_in2_sh 0
5824 
5825 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5826 {
5827     in2_a2(s, o);
5828     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5829 }
5830 #define SPEC_in2_m2_8u 0
5831 
5832 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5833 {
5834     in2_a2(s, o);
5835     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5836 }
5837 #define SPEC_in2_m2_16s 0
5838 
5839 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5840 {
5841     in2_a2(s, o);
5842     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5843 }
5844 #define SPEC_in2_m2_16u 0
5845 
5846 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5847 {
5848     in2_a2(s, o);
5849     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5850 }
5851 #define SPEC_in2_m2_32s 0
5852 
5853 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5854 {
5855     in2_a2(s, o);
5856     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5857 }
5858 #define SPEC_in2_m2_32u 0
5859 
5860 #ifndef CONFIG_USER_ONLY
5861 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5862 {
5863     in2_a2(s, o);
5864     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5865 }
5866 #define SPEC_in2_m2_32ua 0
5867 #endif
5868 
5869 static void in2_m2_64(DisasContext *s, DisasOps *o)
5870 {
5871     in2_a2(s, o);
5872     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5873 }
5874 #define SPEC_in2_m2_64 0
5875 
5876 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5877 {
5878     in2_a2(s, o);
5879     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5880     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5881 }
5882 #define SPEC_in2_m2_64w 0
5883 
5884 #ifndef CONFIG_USER_ONLY
5885 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5886 {
5887     in2_a2(s, o);
5888     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5889 }
5890 #define SPEC_in2_m2_64a 0
5891 #endif
5892 
5893 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5894 {
5895     o->in2 = tcg_temp_new_i64();
5896     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5897 }
5898 #define SPEC_in2_mri2_16s 0
5899 
5900 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5901 {
5902     o->in2 = tcg_temp_new_i64();
5903     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5904 }
5905 #define SPEC_in2_mri2_16u 0
5906 
5907 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5908 {
5909     o->in2 = tcg_temp_new_i64();
5910     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5911                        MO_TESL | MO_ALIGN);
5912 }
5913 #define SPEC_in2_mri2_32s 0
5914 
5915 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5916 {
5917     o->in2 = tcg_temp_new_i64();
5918     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5919                        MO_TEUL | MO_ALIGN);
5920 }
5921 #define SPEC_in2_mri2_32u 0
5922 
5923 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5924 {
5925     o->in2 = tcg_temp_new_i64();
5926     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5927                         MO_TEUQ | MO_ALIGN);
5928 }
5929 #define SPEC_in2_mri2_64 0
5930 
5931 static void in2_i2(DisasContext *s, DisasOps *o)
5932 {
5933     o->in2 = tcg_constant_i64(get_field(s, i2));
5934 }
5935 #define SPEC_in2_i2 0
5936 
5937 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5938 {
5939     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5940 }
5941 #define SPEC_in2_i2_8u 0
5942 
5943 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5944 {
5945     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5946 }
5947 #define SPEC_in2_i2_16u 0
5948 
5949 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5950 {
5951     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5952 }
5953 #define SPEC_in2_i2_32u 0
5954 
5955 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5956 {
5957     uint64_t i2 = (uint16_t)get_field(s, i2);
5958     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5959 }
5960 #define SPEC_in2_i2_16u_shl 0
5961 
5962 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5963 {
5964     uint64_t i2 = (uint32_t)get_field(s, i2);
5965     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5966 }
5967 #define SPEC_in2_i2_32u_shl 0
5968 
5969 #ifndef CONFIG_USER_ONLY
5970 static void in2_insn(DisasContext *s, DisasOps *o)
5971 {
5972     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5973 }
5974 #define SPEC_in2_insn 0
5975 #endif
5976 
5977 /* ====================================================================== */
5978 
5979 /* Find opc within the table of insns.  This is formulated as a switch
5980    statement so that (1) we get compile-time notice of cut-paste errors
5981    for duplicated opcodes, and (2) the compiler generates the binary
5982    search tree, rather than us having to post-process the table.  */
5983 
5984 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5985     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5986 
5987 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5988     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5989 
5990 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5991     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5992 
5993 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5994 
5995 enum DisasInsnEnum {
5996 #include "insn-data.h.inc"
5997 };
5998 
5999 #undef E
6000 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6001     .opc = OPC,                                                             \
6002     .flags = FL,                                                            \
6003     .fmt = FMT_##FT,                                                        \
6004     .fac = FAC_##FC,                                                        \
6005     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6006     .name = #NM,                                                            \
6007     .help_in1 = in1_##I1,                                                   \
6008     .help_in2 = in2_##I2,                                                   \
6009     .help_prep = prep_##P,                                                  \
6010     .help_wout = wout_##W,                                                  \
6011     .help_cout = cout_##CC,                                                 \
6012     .help_op = op_##OP,                                                     \
6013     .data = D                                                               \
6014  },
6015 
6016 /* Allow 0 to be used for NULL in the table below.  */
6017 #define in1_0  NULL
6018 #define in2_0  NULL
6019 #define prep_0  NULL
6020 #define wout_0  NULL
6021 #define cout_0  NULL
6022 #define op_0  NULL
6023 
6024 #define SPEC_in1_0 0
6025 #define SPEC_in2_0 0
6026 #define SPEC_prep_0 0
6027 #define SPEC_wout_0 0
6028 
6029 /* Give smaller names to the various facilities.  */
6030 #define FAC_Z           S390_FEAT_ZARCH
6031 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6032 #define FAC_DFP         S390_FEAT_DFP
6033 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6034 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6035 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6036 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6037 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6038 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6039 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6040 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6041 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6042 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6043 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6044 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6045 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6046 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6047 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6048 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6049 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6050 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6051 #define FAC_SFLE        S390_FEAT_STFLE
6052 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6053 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6054 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6055 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6056 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6057 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6058 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6059 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6060 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6061 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6062 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6063 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6064 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6065 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6066 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6067 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6068 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6069 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6070 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6071 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6072 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6073 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6074 
6075 static const DisasInsn insn_info[] = {
6076 #include "insn-data.h.inc"
6077 };
6078 
6079 #undef E
6080 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6081     case OPC: return &insn_info[insn_ ## NM];
6082 
6083 static const DisasInsn *lookup_opc(uint16_t opc)
6084 {
6085     switch (opc) {
6086 #include "insn-data.h.inc"
6087     default:
6088         return NULL;
6089     }
6090 }
6091 
6092 #undef F
6093 #undef E
6094 #undef D
6095 #undef C
6096 
6097 /* Extract a field from the insn.  The INSN should be left-aligned in
6098    the uint64_t so that we can more easily utilize the big-bit-endian
6099    definitions we extract from the Principals of Operation.  */
6100 
6101 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6102 {
6103     uint32_t r, m;
6104 
6105     if (f->size == 0) {
6106         return;
6107     }
6108 
6109     /* Zero extract the field from the insn.  */
6110     r = (insn << f->beg) >> (64 - f->size);
6111 
6112     /* Sign-extend, or un-swap the field as necessary.  */
6113     switch (f->type) {
6114     case 0: /* unsigned */
6115         break;
6116     case 1: /* signed */
6117         assert(f->size <= 32);
6118         m = 1u << (f->size - 1);
6119         r = (r ^ m) - m;
6120         break;
6121     case 2: /* dl+dh split, signed 20 bit. */
6122         r = ((int8_t)r << 12) | (r >> 8);
6123         break;
6124     case 3: /* MSB stored in RXB */
6125         g_assert(f->size == 4);
6126         switch (f->beg) {
6127         case 8:
6128             r |= extract64(insn, 63 - 36, 1) << 4;
6129             break;
6130         case 12:
6131             r |= extract64(insn, 63 - 37, 1) << 4;
6132             break;
6133         case 16:
6134             r |= extract64(insn, 63 - 38, 1) << 4;
6135             break;
6136         case 32:
6137             r |= extract64(insn, 63 - 39, 1) << 4;
6138             break;
6139         default:
6140             g_assert_not_reached();
6141         }
6142         break;
6143     default:
6144         abort();
6145     }
6146 
6147     /*
6148      * Validate that the "compressed" encoding we selected above is valid.
6149      * I.e. we haven't made two different original fields overlap.
6150      */
6151     assert(((o->presentC >> f->indexC) & 1) == 0);
6152     o->presentC |= 1 << f->indexC;
6153     o->presentO |= 1 << f->indexO;
6154 
6155     o->c[f->indexC] = r;
6156 }
6157 
6158 /* Lookup the insn at the current PC, extracting the operands into O and
6159    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6160 
6161 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6162 {
6163     uint64_t insn, pc = s->base.pc_next;
6164     int op, op2, ilen;
6165     const DisasInsn *info;
6166 
6167     if (unlikely(s->ex_value)) {
6168         /* Drop the EX data now, so that it's clear on exception paths.  */
6169         tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
6170                        offsetof(CPUS390XState, ex_value));
6171 
6172         /* Extract the values saved by EXECUTE.  */
6173         insn = s->ex_value & 0xffffffffffff0000ull;
6174         ilen = s->ex_value & 0xf;
6175 
6176         /* Register insn bytes with translator so plugins work. */
6177         for (int i = 0; i < ilen; i++) {
6178             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6179             translator_fake_ldb(byte, pc + i);
6180         }
6181         op = insn >> 56;
6182     } else {
6183         insn = ld_code2(env, s, pc);
6184         op = (insn >> 8) & 0xff;
6185         ilen = get_ilen(op);
6186         switch (ilen) {
6187         case 2:
6188             insn = insn << 48;
6189             break;
6190         case 4:
6191             insn = ld_code4(env, s, pc) << 32;
6192             break;
6193         case 6:
6194             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6195             break;
6196         default:
6197             g_assert_not_reached();
6198         }
6199     }
6200     s->pc_tmp = s->base.pc_next + ilen;
6201     s->ilen = ilen;
6202 
6203     /* We can't actually determine the insn format until we've looked up
6204        the full insn opcode.  Which we can't do without locating the
6205        secondary opcode.  Assume by default that OP2 is at bit 40; for
6206        those smaller insns that don't actually have a secondary opcode
6207        this will correctly result in OP2 = 0. */
6208     switch (op) {
6209     case 0x01: /* E */
6210     case 0x80: /* S */
6211     case 0x82: /* S */
6212     case 0x93: /* S */
6213     case 0xb2: /* S, RRF, RRE, IE */
6214     case 0xb3: /* RRE, RRD, RRF */
6215     case 0xb9: /* RRE, RRF */
6216     case 0xe5: /* SSE, SIL */
6217         op2 = (insn << 8) >> 56;
6218         break;
6219     case 0xa5: /* RI */
6220     case 0xa7: /* RI */
6221     case 0xc0: /* RIL */
6222     case 0xc2: /* RIL */
6223     case 0xc4: /* RIL */
6224     case 0xc6: /* RIL */
6225     case 0xc8: /* SSF */
6226     case 0xcc: /* RIL */
6227         op2 = (insn << 12) >> 60;
6228         break;
6229     case 0xc5: /* MII */
6230     case 0xc7: /* SMI */
6231     case 0xd0 ... 0xdf: /* SS */
6232     case 0xe1: /* SS */
6233     case 0xe2: /* SS */
6234     case 0xe8: /* SS */
6235     case 0xe9: /* SS */
6236     case 0xea: /* SS */
6237     case 0xee ... 0xf3: /* SS */
6238     case 0xf8 ... 0xfd: /* SS */
6239         op2 = 0;
6240         break;
6241     default:
6242         op2 = (insn << 40) >> 56;
6243         break;
6244     }
6245 
6246     memset(&s->fields, 0, sizeof(s->fields));
6247     s->fields.raw_insn = insn;
6248     s->fields.op = op;
6249     s->fields.op2 = op2;
6250 
6251     /* Lookup the instruction.  */
6252     info = lookup_opc(op << 8 | op2);
6253     s->insn = info;
6254 
6255     /* If we found it, extract the operands.  */
6256     if (info != NULL) {
6257         DisasFormat fmt = info->fmt;
6258         int i;
6259 
6260         for (i = 0; i < NUM_C_FIELD; ++i) {
6261             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6262         }
6263     }
6264     return info;
6265 }
6266 
6267 static bool is_afp_reg(int reg)
6268 {
6269     return reg % 2 || reg > 6;
6270 }
6271 
6272 static bool is_fp_pair(int reg)
6273 {
6274     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6275     return !(reg & 0x2);
6276 }
6277 
6278 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6279 {
6280     const DisasInsn *insn;
6281     DisasJumpType ret = DISAS_NEXT;
6282     DisasOps o = {};
6283     bool icount = false;
6284 
6285     /* Search for the insn in the table.  */
6286     insn = extract_insn(env, s);
6287 
6288     /* Update insn_start now that we know the ILEN.  */
6289     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6290 
6291     /* Not found means unimplemented/illegal opcode.  */
6292     if (insn == NULL) {
6293         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6294                       s->fields.op, s->fields.op2);
6295         gen_illegal_opcode(s);
6296         ret = DISAS_NORETURN;
6297         goto out;
6298     }
6299 
6300 #ifndef CONFIG_USER_ONLY
6301     if (s->base.tb->flags & FLAG_MASK_PER) {
6302         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6303         gen_helper_per_ifetch(cpu_env, addr);
6304     }
6305 #endif
6306 
6307     /* process flags */
6308     if (insn->flags) {
6309         /* privileged instruction */
6310         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6311             gen_program_exception(s, PGM_PRIVILEGED);
6312             ret = DISAS_NORETURN;
6313             goto out;
6314         }
6315 
6316         /* if AFP is not enabled, instructions and registers are forbidden */
6317         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6318             uint8_t dxc = 0;
6319 
6320             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6321                 dxc = 1;
6322             }
6323             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6324                 dxc = 1;
6325             }
6326             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6327                 dxc = 1;
6328             }
6329             if (insn->flags & IF_BFP) {
6330                 dxc = 2;
6331             }
6332             if (insn->flags & IF_DFP) {
6333                 dxc = 3;
6334             }
6335             if (insn->flags & IF_VEC) {
6336                 dxc = 0xfe;
6337             }
6338             if (dxc) {
6339                 gen_data_exception(dxc);
6340                 ret = DISAS_NORETURN;
6341                 goto out;
6342             }
6343         }
6344 
6345         /* if vector instructions not enabled, executing them is forbidden */
6346         if (insn->flags & IF_VEC) {
6347             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6348                 gen_data_exception(0xfe);
6349                 ret = DISAS_NORETURN;
6350                 goto out;
6351             }
6352         }
6353 
6354         /* input/output is the special case for icount mode */
6355         if (unlikely(insn->flags & IF_IO)) {
6356             icount = translator_io_start(&s->base);
6357         }
6358     }
6359 
6360     /* Check for insn specification exceptions.  */
6361     if (insn->spec) {
6362         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6363             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6364             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6365             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6366             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6367             gen_program_exception(s, PGM_SPECIFICATION);
6368             ret = DISAS_NORETURN;
6369             goto out;
6370         }
6371     }
6372 
6373     /* Implement the instruction.  */
6374     if (insn->help_in1) {
6375         insn->help_in1(s, &o);
6376     }
6377     if (insn->help_in2) {
6378         insn->help_in2(s, &o);
6379     }
6380     if (insn->help_prep) {
6381         insn->help_prep(s, &o);
6382     }
6383     if (insn->help_op) {
6384         ret = insn->help_op(s, &o);
6385     }
6386     if (ret != DISAS_NORETURN) {
6387         if (insn->help_wout) {
6388             insn->help_wout(s, &o);
6389         }
6390         if (insn->help_cout) {
6391             insn->help_cout(s, &o);
6392         }
6393     }
6394 
6395     /* io should be the last instruction in tb when icount is enabled */
6396     if (unlikely(icount && ret == DISAS_NEXT)) {
6397         ret = DISAS_TOO_MANY;
6398     }
6399 
6400 #ifndef CONFIG_USER_ONLY
6401     if (s->base.tb->flags & FLAG_MASK_PER) {
6402         /* An exception might be triggered, save PSW if not already done.  */
6403         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6404             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6405         }
6406 
6407         /* Call the helper to check for a possible PER exception.  */
6408         gen_helper_per_check_exception(cpu_env);
6409     }
6410 #endif
6411 
6412 out:
6413     /* Advance to the next instruction.  */
6414     s->base.pc_next = s->pc_tmp;
6415     return ret;
6416 }
6417 
6418 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6419 {
6420     DisasContext *dc = container_of(dcbase, DisasContext, base);
6421 
6422     /* 31-bit mode */
6423     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6424         dc->base.pc_first &= 0x7fffffff;
6425         dc->base.pc_next = dc->base.pc_first;
6426     }
6427 
6428     dc->cc_op = CC_OP_DYNAMIC;
6429     dc->ex_value = dc->base.tb->cs_base;
6430     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6431 }
6432 
6433 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6434 {
6435 }
6436 
6437 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6438 {
6439     DisasContext *dc = container_of(dcbase, DisasContext, base);
6440 
6441     /* Delay the set of ilen until we've read the insn. */
6442     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6443     dc->insn_start = tcg_last_op();
6444 }
6445 
6446 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6447                                 uint64_t pc)
6448 {
6449     uint64_t insn = cpu_lduw_code(env, pc);
6450 
6451     return pc + get_ilen((insn >> 8) & 0xff);
6452 }
6453 
6454 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6455 {
6456     CPUS390XState *env = cs->env_ptr;
6457     DisasContext *dc = container_of(dcbase, DisasContext, base);
6458 
6459     dc->base.is_jmp = translate_one(env, dc);
6460     if (dc->base.is_jmp == DISAS_NEXT) {
6461         if (dc->ex_value ||
6462             !is_same_page(dcbase, dc->base.pc_next) ||
6463             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6464             dc->base.is_jmp = DISAS_TOO_MANY;
6465         }
6466     }
6467 }
6468 
6469 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6470 {
6471     DisasContext *dc = container_of(dcbase, DisasContext, base);
6472 
6473     switch (dc->base.is_jmp) {
6474     case DISAS_NORETURN:
6475         break;
6476     case DISAS_TOO_MANY:
6477         update_psw_addr(dc);
6478         /* FALLTHRU */
6479     case DISAS_PC_UPDATED:
6480         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6481            cc op type is in env */
6482         update_cc_op(dc);
6483         /* FALLTHRU */
6484     case DISAS_PC_CC_UPDATED:
6485         /* Exit the TB, either by raising a debug exception or by return.  */
6486         if (dc->exit_to_mainloop) {
6487             tcg_gen_exit_tb(NULL, 0);
6488         } else {
6489             tcg_gen_lookup_and_goto_ptr();
6490         }
6491         break;
6492     default:
6493         g_assert_not_reached();
6494     }
6495 }
6496 
6497 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6498                                CPUState *cs, FILE *logfile)
6499 {
6500     DisasContext *dc = container_of(dcbase, DisasContext, base);
6501 
6502     if (unlikely(dc->ex_value)) {
6503         /* ??? Unfortunately target_disas can't use host memory.  */
6504         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6505     } else {
6506         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6507         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6508     }
6509 }
6510 
6511 static const TranslatorOps s390x_tr_ops = {
6512     .init_disas_context = s390x_tr_init_disas_context,
6513     .tb_start           = s390x_tr_tb_start,
6514     .insn_start         = s390x_tr_insn_start,
6515     .translate_insn     = s390x_tr_translate_insn,
6516     .tb_stop            = s390x_tr_tb_stop,
6517     .disas_log          = s390x_tr_disas_log,
6518 };
6519 
6520 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6521                            target_ulong pc, void *host_pc)
6522 {
6523     DisasContext dc;
6524 
6525     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6526 }
6527 
6528 void s390x_restore_state_to_opc(CPUState *cs,
6529                                 const TranslationBlock *tb,
6530                                 const uint64_t *data)
6531 {
6532     S390CPU *cpu = S390_CPU(cs);
6533     CPUS390XState *env = &cpu->env;
6534     int cc_op = data[1];
6535 
6536     env->psw.addr = data[0];
6537 
6538     /* Update the CC opcode if it is not already up-to-date.  */
6539     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6540         env->cc_op = cc_op;
6541     }
6542 
6543     /* Record ILEN.  */
6544     env->int_pgm_ilen = data[2];
6545 }
6546