xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision c0ce7b4a)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
43 
44 #include "exec/translator.h"
45 #include "exec/log.h"
46 #include "qemu/atomic128.h"
47 
48 #define HELPER_H "helper.h"
49 #include "exec/helper-info.c.inc"
50 #undef  HELPER_H
51 
52 
53 /* Information that (most) every instruction needs to manipulate.  */
54 typedef struct DisasContext DisasContext;
55 typedef struct DisasInsn DisasInsn;
56 typedef struct DisasFields DisasFields;
57 
58 /*
59  * Define a structure to hold the decoded fields.  We'll store each inside
60  * an array indexed by an enum.  In order to conserve memory, we'll arrange
61  * for fields that do not exist at the same time to overlap, thus the "C"
62  * for compact.  For checking purposes there is an "O" for original index
63  * as well that will be applied to availability bitmaps.
64  */
65 
66 enum DisasFieldIndexO {
67     FLD_O_r1,
68     FLD_O_r2,
69     FLD_O_r3,
70     FLD_O_m1,
71     FLD_O_m3,
72     FLD_O_m4,
73     FLD_O_m5,
74     FLD_O_m6,
75     FLD_O_b1,
76     FLD_O_b2,
77     FLD_O_b4,
78     FLD_O_d1,
79     FLD_O_d2,
80     FLD_O_d4,
81     FLD_O_x2,
82     FLD_O_l1,
83     FLD_O_l2,
84     FLD_O_i1,
85     FLD_O_i2,
86     FLD_O_i3,
87     FLD_O_i4,
88     FLD_O_i5,
89     FLD_O_v1,
90     FLD_O_v2,
91     FLD_O_v3,
92     FLD_O_v4,
93 };
94 
95 enum DisasFieldIndexC {
96     FLD_C_r1 = 0,
97     FLD_C_m1 = 0,
98     FLD_C_b1 = 0,
99     FLD_C_i1 = 0,
100     FLD_C_v1 = 0,
101 
102     FLD_C_r2 = 1,
103     FLD_C_b2 = 1,
104     FLD_C_i2 = 1,
105 
106     FLD_C_r3 = 2,
107     FLD_C_m3 = 2,
108     FLD_C_i3 = 2,
109     FLD_C_v3 = 2,
110 
111     FLD_C_m4 = 3,
112     FLD_C_b4 = 3,
113     FLD_C_i4 = 3,
114     FLD_C_l1 = 3,
115     FLD_C_v4 = 3,
116 
117     FLD_C_i5 = 4,
118     FLD_C_d1 = 4,
119     FLD_C_m5 = 4,
120 
121     FLD_C_d2 = 5,
122     FLD_C_m6 = 5,
123 
124     FLD_C_d4 = 6,
125     FLD_C_x2 = 6,
126     FLD_C_l2 = 6,
127     FLD_C_v2 = 6,
128 
129     NUM_C_FIELD = 7
130 };
131 
132 struct DisasFields {
133     uint64_t raw_insn;
134     unsigned op:8;
135     unsigned op2:8;
136     unsigned presentC:16;
137     unsigned int presentO;
138     int c[NUM_C_FIELD];
139 };
140 
141 struct DisasContext {
142     DisasContextBase base;
143     const DisasInsn *insn;
144     TCGOp *insn_start;
145     DisasFields fields;
146     uint64_t ex_value;
147     /*
148      * During translate_one(), pc_tmp is used to determine the instruction
149      * to be executed after base.pc_next - e.g. next sequential instruction
150      * or a branch target.
151      */
152     uint64_t pc_tmp;
153     uint32_t ilen;
154     enum cc_op cc_op;
155     bool exit_to_mainloop;
156 };
157 
158 /* Information carried about a condition to be evaluated.  */
159 typedef struct {
160     TCGCond cond:8;
161     bool is_64;
162     union {
163         struct { TCGv_i64 a, b; } s64;
164         struct { TCGv_i32 a, b; } s32;
165     } u;
166 } DisasCompare;
167 
168 #ifdef DEBUG_INLINE_BRANCHES
169 static uint64_t inline_branch_hit[CC_OP_MAX];
170 static uint64_t inline_branch_miss[CC_OP_MAX];
171 #endif
172 
173 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
174 {
175     if (s->base.tb->flags & FLAG_MASK_32) {
176         if (s->base.tb->flags & FLAG_MASK_64) {
177             tcg_gen_movi_i64(out, pc);
178             return;
179         }
180         pc |= 0x80000000;
181     }
182     assert(!(s->base.tb->flags & FLAG_MASK_64));
183     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
184 }
185 
186 static TCGv_i64 psw_addr;
187 static TCGv_i64 psw_mask;
188 static TCGv_i64 gbea;
189 
190 static TCGv_i32 cc_op;
191 static TCGv_i64 cc_src;
192 static TCGv_i64 cc_dst;
193 static TCGv_i64 cc_vr;
194 
195 static char cpu_reg_names[16][4];
196 static TCGv_i64 regs[16];
197 
198 void s390x_translate_init(void)
199 {
200     int i;
201 
202     psw_addr = tcg_global_mem_new_i64(cpu_env,
203                                       offsetof(CPUS390XState, psw.addr),
204                                       "psw_addr");
205     psw_mask = tcg_global_mem_new_i64(cpu_env,
206                                       offsetof(CPUS390XState, psw.mask),
207                                       "psw_mask");
208     gbea = tcg_global_mem_new_i64(cpu_env,
209                                   offsetof(CPUS390XState, gbea),
210                                   "gbea");
211 
212     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
213                                    "cc_op");
214     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
215                                     "cc_src");
216     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
217                                     "cc_dst");
218     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
219                                    "cc_vr");
220 
221     for (i = 0; i < 16; i++) {
222         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
223         regs[i] = tcg_global_mem_new(cpu_env,
224                                      offsetof(CPUS390XState, regs[i]),
225                                      cpu_reg_names[i]);
226     }
227 }
228 
229 static inline int vec_full_reg_offset(uint8_t reg)
230 {
231     g_assert(reg < 32);
232     return offsetof(CPUS390XState, vregs[reg][0]);
233 }
234 
235 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
236 {
237     /* Convert element size (es) - e.g. MO_8 - to bytes */
238     const uint8_t bytes = 1 << es;
239     int offs = enr * bytes;
240 
241     /*
242      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
243      * of the 16 byte vector, on both, little and big endian systems.
244      *
245      * Big Endian (target/possible host)
246      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
247      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
248      * W:  [             0][             1] - [             2][             3]
249      * DW: [                             0] - [                             1]
250      *
251      * Little Endian (possible host)
252      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
253      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
254      * W:  [             1][             0] - [             3][             2]
255      * DW: [                             0] - [                             1]
256      *
257      * For 16 byte elements, the two 8 byte halves will not form a host
258      * int128 if the host is little endian, since they're in the wrong order.
259      * Some operations (e.g. xor) do not care. For operations like addition,
260      * the two 8 byte elements have to be loaded separately. Let's force all
261      * 16 byte operations to handle it in a special way.
262      */
263     g_assert(es <= MO_64);
264 #if !HOST_BIG_ENDIAN
265     offs ^= (8 - bytes);
266 #endif
267     return offs + vec_full_reg_offset(reg);
268 }
269 
270 static inline int freg64_offset(uint8_t reg)
271 {
272     g_assert(reg < 16);
273     return vec_reg_offset(reg, 0, MO_64);
274 }
275 
276 static inline int freg32_offset(uint8_t reg)
277 {
278     g_assert(reg < 16);
279     return vec_reg_offset(reg, 0, MO_32);
280 }
281 
282 static TCGv_i64 load_reg(int reg)
283 {
284     TCGv_i64 r = tcg_temp_new_i64();
285     tcg_gen_mov_i64(r, regs[reg]);
286     return r;
287 }
288 
289 static TCGv_i64 load_freg(int reg)
290 {
291     TCGv_i64 r = tcg_temp_new_i64();
292 
293     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
294     return r;
295 }
296 
297 static TCGv_i64 load_freg32_i64(int reg)
298 {
299     TCGv_i64 r = tcg_temp_new_i64();
300 
301     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
302     return r;
303 }
304 
305 static TCGv_i128 load_freg_128(int reg)
306 {
307     TCGv_i64 h = load_freg(reg);
308     TCGv_i64 l = load_freg(reg + 2);
309     TCGv_i128 r = tcg_temp_new_i128();
310 
311     tcg_gen_concat_i64_i128(r, l, h);
312     return r;
313 }
314 
315 static void store_reg(int reg, TCGv_i64 v)
316 {
317     tcg_gen_mov_i64(regs[reg], v);
318 }
319 
320 static void store_freg(int reg, TCGv_i64 v)
321 {
322     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
323 }
324 
325 static void store_reg32_i64(int reg, TCGv_i64 v)
326 {
327     /* 32 bit register writes keep the upper half */
328     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
329 }
330 
331 static void store_reg32h_i64(int reg, TCGv_i64 v)
332 {
333     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
334 }
335 
336 static void store_freg32_i64(int reg, TCGv_i64 v)
337 {
338     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
339 }
340 
341 static void update_psw_addr(DisasContext *s)
342 {
343     /* psw.addr */
344     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
345 }
346 
347 static void per_branch(DisasContext *s, bool to_next)
348 {
349 #ifndef CONFIG_USER_ONLY
350     tcg_gen_movi_i64(gbea, s->base.pc_next);
351 
352     if (s->base.tb->flags & FLAG_MASK_PER) {
353         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
354         gen_helper_per_branch(cpu_env, gbea, next_pc);
355     }
356 #endif
357 }
358 
359 static void per_branch_cond(DisasContext *s, TCGCond cond,
360                             TCGv_i64 arg1, TCGv_i64 arg2)
361 {
362 #ifndef CONFIG_USER_ONLY
363     if (s->base.tb->flags & FLAG_MASK_PER) {
364         TCGLabel *lab = gen_new_label();
365         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
366 
367         tcg_gen_movi_i64(gbea, s->base.pc_next);
368         gen_helper_per_branch(cpu_env, gbea, psw_addr);
369 
370         gen_set_label(lab);
371     } else {
372         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
373         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
374     }
375 #endif
376 }
377 
378 static void per_breaking_event(DisasContext *s)
379 {
380     tcg_gen_movi_i64(gbea, s->base.pc_next);
381 }
382 
383 static void update_cc_op(DisasContext *s)
384 {
385     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
386         tcg_gen_movi_i32(cc_op, s->cc_op);
387     }
388 }
389 
390 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
391                                 uint64_t pc)
392 {
393     return (uint64_t)translator_lduw(env, &s->base, pc);
394 }
395 
396 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
397                                 uint64_t pc)
398 {
399     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
400 }
401 
402 static int get_mem_index(DisasContext *s)
403 {
404 #ifdef CONFIG_USER_ONLY
405     return MMU_USER_IDX;
406 #else
407     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
408         return MMU_REAL_IDX;
409     }
410 
411     switch (s->base.tb->flags & FLAG_MASK_ASC) {
412     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
413         return MMU_PRIMARY_IDX;
414     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_SECONDARY_IDX;
416     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
417         return MMU_HOME_IDX;
418     default:
419         g_assert_not_reached();
420         break;
421     }
422 #endif
423 }
424 
425 static void gen_exception(int excp)
426 {
427     gen_helper_exception(cpu_env, tcg_constant_i32(excp));
428 }
429 
430 static void gen_program_exception(DisasContext *s, int code)
431 {
432     /* Remember what pgm exeption this was.  */
433     tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
434                    offsetof(CPUS390XState, int_pgm_code));
435 
436     tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
437                    offsetof(CPUS390XState, int_pgm_ilen));
438 
439     /* update the psw */
440     update_psw_addr(s);
441 
442     /* Save off cc.  */
443     update_cc_op(s);
444 
445     /* Trigger exception.  */
446     gen_exception(EXCP_PGM);
447 }
448 
449 static inline void gen_illegal_opcode(DisasContext *s)
450 {
451     gen_program_exception(s, PGM_OPERATION);
452 }
453 
454 static inline void gen_data_exception(uint8_t dxc)
455 {
456     gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
457 }
458 
459 static inline void gen_trap(DisasContext *s)
460 {
461     /* Set DXC to 0xff */
462     gen_data_exception(0xff);
463 }
464 
465 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
466                                   int64_t imm)
467 {
468     tcg_gen_addi_i64(dst, src, imm);
469     if (!(s->base.tb->flags & FLAG_MASK_64)) {
470         if (s->base.tb->flags & FLAG_MASK_32) {
471             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
472         } else {
473             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
474         }
475     }
476 }
477 
478 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
479 {
480     TCGv_i64 tmp = tcg_temp_new_i64();
481 
482     /*
483      * Note that d2 is limited to 20 bits, signed.  If we crop negative
484      * displacements early we create larger immediate addends.
485      */
486     if (b2 && x2) {
487         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
488         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
489     } else if (b2) {
490         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
491     } else if (x2) {
492         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
493     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
494         if (s->base.tb->flags & FLAG_MASK_32) {
495             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
496         } else {
497             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
498         }
499     } else {
500         tcg_gen_movi_i64(tmp, d2);
501     }
502 
503     return tmp;
504 }
505 
506 static inline bool live_cc_data(DisasContext *s)
507 {
508     return (s->cc_op != CC_OP_DYNAMIC
509             && s->cc_op != CC_OP_STATIC
510             && s->cc_op > 3);
511 }
512 
513 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
514 {
515     if (live_cc_data(s)) {
516         tcg_gen_discard_i64(cc_src);
517         tcg_gen_discard_i64(cc_dst);
518         tcg_gen_discard_i64(cc_vr);
519     }
520     s->cc_op = CC_OP_CONST0 + val;
521 }
522 
523 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
524 {
525     if (live_cc_data(s)) {
526         tcg_gen_discard_i64(cc_src);
527         tcg_gen_discard_i64(cc_vr);
528     }
529     tcg_gen_mov_i64(cc_dst, dst);
530     s->cc_op = op;
531 }
532 
533 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
534                                   TCGv_i64 dst)
535 {
536     if (live_cc_data(s)) {
537         tcg_gen_discard_i64(cc_vr);
538     }
539     tcg_gen_mov_i64(cc_src, src);
540     tcg_gen_mov_i64(cc_dst, dst);
541     s->cc_op = op;
542 }
543 
544 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
545                                   TCGv_i64 dst, TCGv_i64 vr)
546 {
547     tcg_gen_mov_i64(cc_src, src);
548     tcg_gen_mov_i64(cc_dst, dst);
549     tcg_gen_mov_i64(cc_vr, vr);
550     s->cc_op = op;
551 }
552 
553 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
554 {
555     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
556 }
557 
558 /* CC value is in env->cc_op */
559 static void set_cc_static(DisasContext *s)
560 {
561     if (live_cc_data(s)) {
562         tcg_gen_discard_i64(cc_src);
563         tcg_gen_discard_i64(cc_dst);
564         tcg_gen_discard_i64(cc_vr);
565     }
566     s->cc_op = CC_OP_STATIC;
567 }
568 
569 /* calculates cc into cc_op */
570 static void gen_op_calc_cc(DisasContext *s)
571 {
572     TCGv_i32 local_cc_op = NULL;
573     TCGv_i64 dummy = NULL;
574 
575     switch (s->cc_op) {
576     default:
577         dummy = tcg_constant_i64(0);
578         /* FALLTHRU */
579     case CC_OP_ADD_64:
580     case CC_OP_SUB_64:
581     case CC_OP_ADD_32:
582     case CC_OP_SUB_32:
583         local_cc_op = tcg_constant_i32(s->cc_op);
584         break;
585     case CC_OP_CONST0:
586     case CC_OP_CONST1:
587     case CC_OP_CONST2:
588     case CC_OP_CONST3:
589     case CC_OP_STATIC:
590     case CC_OP_DYNAMIC:
591         break;
592     }
593 
594     switch (s->cc_op) {
595     case CC_OP_CONST0:
596     case CC_OP_CONST1:
597     case CC_OP_CONST2:
598     case CC_OP_CONST3:
599         /* s->cc_op is the cc value */
600         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
601         break;
602     case CC_OP_STATIC:
603         /* env->cc_op already is the cc value */
604         break;
605     case CC_OP_NZ:
606         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
607         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
608         break;
609     case CC_OP_ABS_64:
610     case CC_OP_NABS_64:
611     case CC_OP_ABS_32:
612     case CC_OP_NABS_32:
613     case CC_OP_LTGT0_32:
614     case CC_OP_LTGT0_64:
615     case CC_OP_COMP_32:
616     case CC_OP_COMP_64:
617     case CC_OP_NZ_F32:
618     case CC_OP_NZ_F64:
619     case CC_OP_FLOGR:
620     case CC_OP_LCBB:
621     case CC_OP_MULS_32:
622         /* 1 argument */
623         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
624         break;
625     case CC_OP_ADDU:
626     case CC_OP_ICM:
627     case CC_OP_LTGT_32:
628     case CC_OP_LTGT_64:
629     case CC_OP_LTUGTU_32:
630     case CC_OP_LTUGTU_64:
631     case CC_OP_TM_32:
632     case CC_OP_TM_64:
633     case CC_OP_SLA:
634     case CC_OP_SUBU:
635     case CC_OP_NZ_F128:
636     case CC_OP_VC:
637     case CC_OP_MULS_64:
638         /* 2 arguments */
639         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
640         break;
641     case CC_OP_ADD_64:
642     case CC_OP_SUB_64:
643     case CC_OP_ADD_32:
644     case CC_OP_SUB_32:
645         /* 3 arguments */
646         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
647         break;
648     case CC_OP_DYNAMIC:
649         /* unknown operation - assume 3 arguments and cc_op in env */
650         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
651         break;
652     default:
653         g_assert_not_reached();
654     }
655 
656     /* We now have cc in cc_op as constant */
657     set_cc_static(s);
658 }
659 
660 static bool use_goto_tb(DisasContext *s, uint64_t dest)
661 {
662     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
663         return false;
664     }
665     return translator_use_goto_tb(&s->base, dest);
666 }
667 
668 static void account_noninline_branch(DisasContext *s, int cc_op)
669 {
670 #ifdef DEBUG_INLINE_BRANCHES
671     inline_branch_miss[cc_op]++;
672 #endif
673 }
674 
675 static void account_inline_branch(DisasContext *s, int cc_op)
676 {
677 #ifdef DEBUG_INLINE_BRANCHES
678     inline_branch_hit[cc_op]++;
679 #endif
680 }
681 
682 /* Table of mask values to comparison codes, given a comparison as input.
683    For such, CC=3 should not be possible.  */
684 static const TCGCond ltgt_cond[16] = {
685     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
686     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
687     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
688     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
689     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
690     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
691     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
692     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
693 };
694 
695 /* Table of mask values to comparison codes, given a logic op as input.
696    For such, only CC=0 and CC=1 should be possible.  */
697 static const TCGCond nz_cond[16] = {
698     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
699     TCG_COND_NEVER, TCG_COND_NEVER,
700     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
701     TCG_COND_NE, TCG_COND_NE,
702     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
703     TCG_COND_EQ, TCG_COND_EQ,
704     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
705     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
706 };
707 
708 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
709    details required to generate a TCG comparison.  */
710 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
711 {
712     TCGCond cond;
713     enum cc_op old_cc_op = s->cc_op;
714 
715     if (mask == 15 || mask == 0) {
716         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
717         c->u.s32.a = cc_op;
718         c->u.s32.b = cc_op;
719         c->is_64 = false;
720         return;
721     }
722 
723     /* Find the TCG condition for the mask + cc op.  */
724     switch (old_cc_op) {
725     case CC_OP_LTGT0_32:
726     case CC_OP_LTGT0_64:
727     case CC_OP_LTGT_32:
728     case CC_OP_LTGT_64:
729         cond = ltgt_cond[mask];
730         if (cond == TCG_COND_NEVER) {
731             goto do_dynamic;
732         }
733         account_inline_branch(s, old_cc_op);
734         break;
735 
736     case CC_OP_LTUGTU_32:
737     case CC_OP_LTUGTU_64:
738         cond = tcg_unsigned_cond(ltgt_cond[mask]);
739         if (cond == TCG_COND_NEVER) {
740             goto do_dynamic;
741         }
742         account_inline_branch(s, old_cc_op);
743         break;
744 
745     case CC_OP_NZ:
746         cond = nz_cond[mask];
747         if (cond == TCG_COND_NEVER) {
748             goto do_dynamic;
749         }
750         account_inline_branch(s, old_cc_op);
751         break;
752 
753     case CC_OP_TM_32:
754     case CC_OP_TM_64:
755         switch (mask) {
756         case 8:
757             cond = TCG_COND_EQ;
758             break;
759         case 4 | 2 | 1:
760             cond = TCG_COND_NE;
761             break;
762         default:
763             goto do_dynamic;
764         }
765         account_inline_branch(s, old_cc_op);
766         break;
767 
768     case CC_OP_ICM:
769         switch (mask) {
770         case 8:
771             cond = TCG_COND_EQ;
772             break;
773         case 4 | 2 | 1:
774         case 4 | 2:
775             cond = TCG_COND_NE;
776             break;
777         default:
778             goto do_dynamic;
779         }
780         account_inline_branch(s, old_cc_op);
781         break;
782 
783     case CC_OP_FLOGR:
784         switch (mask & 0xa) {
785         case 8: /* src == 0 -> no one bit found */
786             cond = TCG_COND_EQ;
787             break;
788         case 2: /* src != 0 -> one bit found */
789             cond = TCG_COND_NE;
790             break;
791         default:
792             goto do_dynamic;
793         }
794         account_inline_branch(s, old_cc_op);
795         break;
796 
797     case CC_OP_ADDU:
798     case CC_OP_SUBU:
799         switch (mask) {
800         case 8 | 2: /* result == 0 */
801             cond = TCG_COND_EQ;
802             break;
803         case 4 | 1: /* result != 0 */
804             cond = TCG_COND_NE;
805             break;
806         case 8 | 4: /* !carry (borrow) */
807             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
808             break;
809         case 2 | 1: /* carry (!borrow) */
810             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
811             break;
812         default:
813             goto do_dynamic;
814         }
815         account_inline_branch(s, old_cc_op);
816         break;
817 
818     default:
819     do_dynamic:
820         /* Calculate cc value.  */
821         gen_op_calc_cc(s);
822         /* FALLTHRU */
823 
824     case CC_OP_STATIC:
825         /* Jump based on CC.  We'll load up the real cond below;
826            the assignment here merely avoids a compiler warning.  */
827         account_noninline_branch(s, old_cc_op);
828         old_cc_op = CC_OP_STATIC;
829         cond = TCG_COND_NEVER;
830         break;
831     }
832 
833     /* Load up the arguments of the comparison.  */
834     c->is_64 = true;
835     switch (old_cc_op) {
836     case CC_OP_LTGT0_32:
837         c->is_64 = false;
838         c->u.s32.a = tcg_temp_new_i32();
839         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
840         c->u.s32.b = tcg_constant_i32(0);
841         break;
842     case CC_OP_LTGT_32:
843     case CC_OP_LTUGTU_32:
844         c->is_64 = false;
845         c->u.s32.a = tcg_temp_new_i32();
846         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
847         c->u.s32.b = tcg_temp_new_i32();
848         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
849         break;
850 
851     case CC_OP_LTGT0_64:
852     case CC_OP_NZ:
853     case CC_OP_FLOGR:
854         c->u.s64.a = cc_dst;
855         c->u.s64.b = tcg_constant_i64(0);
856         break;
857     case CC_OP_LTGT_64:
858     case CC_OP_LTUGTU_64:
859         c->u.s64.a = cc_src;
860         c->u.s64.b = cc_dst;
861         break;
862 
863     case CC_OP_TM_32:
864     case CC_OP_TM_64:
865     case CC_OP_ICM:
866         c->u.s64.a = tcg_temp_new_i64();
867         c->u.s64.b = tcg_constant_i64(0);
868         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
869         break;
870 
871     case CC_OP_ADDU:
872     case CC_OP_SUBU:
873         c->is_64 = true;
874         c->u.s64.b = tcg_constant_i64(0);
875         switch (mask) {
876         case 8 | 2:
877         case 4 | 1: /* result */
878             c->u.s64.a = cc_dst;
879             break;
880         case 8 | 4:
881         case 2 | 1: /* carry */
882             c->u.s64.a = cc_src;
883             break;
884         default:
885             g_assert_not_reached();
886         }
887         break;
888 
889     case CC_OP_STATIC:
890         c->is_64 = false;
891         c->u.s32.a = cc_op;
892         switch (mask) {
893         case 0x8 | 0x4 | 0x2: /* cc != 3 */
894             cond = TCG_COND_NE;
895             c->u.s32.b = tcg_constant_i32(3);
896             break;
897         case 0x8 | 0x4 | 0x1: /* cc != 2 */
898             cond = TCG_COND_NE;
899             c->u.s32.b = tcg_constant_i32(2);
900             break;
901         case 0x8 | 0x2 | 0x1: /* cc != 1 */
902             cond = TCG_COND_NE;
903             c->u.s32.b = tcg_constant_i32(1);
904             break;
905         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
906             cond = TCG_COND_EQ;
907             c->u.s32.a = tcg_temp_new_i32();
908             c->u.s32.b = tcg_constant_i32(0);
909             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
910             break;
911         case 0x8 | 0x4: /* cc < 2 */
912             cond = TCG_COND_LTU;
913             c->u.s32.b = tcg_constant_i32(2);
914             break;
915         case 0x8: /* cc == 0 */
916             cond = TCG_COND_EQ;
917             c->u.s32.b = tcg_constant_i32(0);
918             break;
919         case 0x4 | 0x2 | 0x1: /* cc != 0 */
920             cond = TCG_COND_NE;
921             c->u.s32.b = tcg_constant_i32(0);
922             break;
923         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
924             cond = TCG_COND_NE;
925             c->u.s32.a = tcg_temp_new_i32();
926             c->u.s32.b = tcg_constant_i32(0);
927             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
928             break;
929         case 0x4: /* cc == 1 */
930             cond = TCG_COND_EQ;
931             c->u.s32.b = tcg_constant_i32(1);
932             break;
933         case 0x2 | 0x1: /* cc > 1 */
934             cond = TCG_COND_GTU;
935             c->u.s32.b = tcg_constant_i32(1);
936             break;
937         case 0x2: /* cc == 2 */
938             cond = TCG_COND_EQ;
939             c->u.s32.b = tcg_constant_i32(2);
940             break;
941         case 0x1: /* cc == 3 */
942             cond = TCG_COND_EQ;
943             c->u.s32.b = tcg_constant_i32(3);
944             break;
945         default:
946             /* CC is masked by something else: (8 >> cc) & mask.  */
947             cond = TCG_COND_NE;
948             c->u.s32.a = tcg_temp_new_i32();
949             c->u.s32.b = tcg_constant_i32(0);
950             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
951             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
952             break;
953         }
954         break;
955 
956     default:
957         abort();
958     }
959     c->cond = cond;
960 }
961 
962 /* ====================================================================== */
963 /* Define the insn format enumeration.  */
964 #define F0(N)                         FMT_##N,
965 #define F1(N, X1)                     F0(N)
966 #define F2(N, X1, X2)                 F0(N)
967 #define F3(N, X1, X2, X3)             F0(N)
968 #define F4(N, X1, X2, X3, X4)         F0(N)
969 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
970 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
971 
972 typedef enum {
973 #include "insn-format.h.inc"
974 } DisasFormat;
975 
976 #undef F0
977 #undef F1
978 #undef F2
979 #undef F3
980 #undef F4
981 #undef F5
982 #undef F6
983 
984 /* This is the way fields are to be accessed out of DisasFields.  */
985 #define have_field(S, F)  have_field1((S), FLD_O_##F)
986 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
987 
988 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
989 {
990     return (s->fields.presentO >> c) & 1;
991 }
992 
993 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
994                       enum DisasFieldIndexC c)
995 {
996     assert(have_field1(s, o));
997     return s->fields.c[c];
998 }
999 
1000 /* Describe the layout of each field in each format.  */
1001 typedef struct DisasField {
1002     unsigned int beg:8;
1003     unsigned int size:8;
1004     unsigned int type:2;
1005     unsigned int indexC:6;
1006     enum DisasFieldIndexO indexO:8;
1007 } DisasField;
1008 
1009 typedef struct DisasFormatInfo {
1010     DisasField op[NUM_C_FIELD];
1011 } DisasFormatInfo;
1012 
1013 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1014 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1015 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1016 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1017                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1018 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1020                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1021 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1022                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1023 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1025                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1026 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1027 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1028 
1029 #define F0(N)                     { { } },
1030 #define F1(N, X1)                 { { X1 } },
1031 #define F2(N, X1, X2)             { { X1, X2 } },
1032 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1033 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1034 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1035 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1036 
1037 static const DisasFormatInfo format_info[] = {
1038 #include "insn-format.h.inc"
1039 };
1040 
1041 #undef F0
1042 #undef F1
1043 #undef F2
1044 #undef F3
1045 #undef F4
1046 #undef F5
1047 #undef F6
1048 #undef R
1049 #undef M
1050 #undef V
1051 #undef BD
1052 #undef BXD
1053 #undef BDL
1054 #undef BXDL
1055 #undef I
1056 #undef L
1057 
1058 /* Generally, we'll extract operands into this structures, operate upon
1059    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1060    of routines below for more details.  */
1061 typedef struct {
1062     TCGv_i64 out, out2, in1, in2;
1063     TCGv_i64 addr1;
1064     TCGv_i128 out_128, in1_128, in2_128;
1065 } DisasOps;
1066 
1067 /* Instructions can place constraints on their operands, raising specification
1068    exceptions if they are violated.  To make this easy to automate, each "in1",
1069    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1070    of the following, or 0.  To make this easy to document, we'll put the
1071    SPEC_<name> defines next to <name>.  */
1072 
1073 #define SPEC_r1_even    1
1074 #define SPEC_r2_even    2
1075 #define SPEC_r3_even    4
1076 #define SPEC_r1_f128    8
1077 #define SPEC_r2_f128    16
1078 
1079 /* Return values from translate_one, indicating the state of the TB.  */
1080 
1081 /* We are not using a goto_tb (for whatever reason), but have updated
1082    the PC (for whatever reason), so there's no need to do it again on
1083    exiting the TB.  */
1084 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1085 
1086 /* We have updated the PC and CC values.  */
1087 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1088 
1089 
1090 /* Instruction flags */
1091 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1092 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1093 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1094 #define IF_BFP      0x0008      /* binary floating point instruction */
1095 #define IF_DFP      0x0010      /* decimal floating point instruction */
1096 #define IF_PRIV     0x0020      /* privileged instruction */
1097 #define IF_VEC      0x0040      /* vector instruction */
1098 #define IF_IO       0x0080      /* input/output instruction */
1099 
1100 struct DisasInsn {
1101     unsigned opc:16;
1102     unsigned flags:16;
1103     DisasFormat fmt:8;
1104     unsigned fac:8;
1105     unsigned spec:8;
1106 
1107     const char *name;
1108 
1109     /* Pre-process arguments before HELP_OP.  */
1110     void (*help_in1)(DisasContext *, DisasOps *);
1111     void (*help_in2)(DisasContext *, DisasOps *);
1112     void (*help_prep)(DisasContext *, DisasOps *);
1113 
1114     /*
1115      * Post-process output after HELP_OP.
1116      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1117      */
1118     void (*help_wout)(DisasContext *, DisasOps *);
1119     void (*help_cout)(DisasContext *, DisasOps *);
1120 
1121     /* Implement the operation itself.  */
1122     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1123 
1124     uint64_t data;
1125 };
1126 
1127 /* ====================================================================== */
1128 /* Miscellaneous helpers, used by several operations.  */
1129 
1130 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1131 {
1132     if (dest == s->pc_tmp) {
1133         per_branch(s, true);
1134         return DISAS_NEXT;
1135     }
1136     if (use_goto_tb(s, dest)) {
1137         update_cc_op(s);
1138         per_breaking_event(s);
1139         tcg_gen_goto_tb(0);
1140         tcg_gen_movi_i64(psw_addr, dest);
1141         tcg_gen_exit_tb(s->base.tb, 0);
1142         return DISAS_NORETURN;
1143     } else {
1144         tcg_gen_movi_i64(psw_addr, dest);
1145         per_branch(s, false);
1146         return DISAS_PC_UPDATED;
1147     }
1148 }
1149 
1150 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1151                                  bool is_imm, int imm, TCGv_i64 cdest)
1152 {
1153     DisasJumpType ret;
1154     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1155     TCGLabel *lab;
1156 
1157     /* Take care of the special cases first.  */
1158     if (c->cond == TCG_COND_NEVER) {
1159         ret = DISAS_NEXT;
1160         goto egress;
1161     }
1162     if (is_imm) {
1163         if (dest == s->pc_tmp) {
1164             /* Branch to next.  */
1165             per_branch(s, true);
1166             ret = DISAS_NEXT;
1167             goto egress;
1168         }
1169         if (c->cond == TCG_COND_ALWAYS) {
1170             ret = help_goto_direct(s, dest);
1171             goto egress;
1172         }
1173     } else {
1174         if (!cdest) {
1175             /* E.g. bcr %r0 -> no branch.  */
1176             ret = DISAS_NEXT;
1177             goto egress;
1178         }
1179         if (c->cond == TCG_COND_ALWAYS) {
1180             tcg_gen_mov_i64(psw_addr, cdest);
1181             per_branch(s, false);
1182             ret = DISAS_PC_UPDATED;
1183             goto egress;
1184         }
1185     }
1186 
1187     if (use_goto_tb(s, s->pc_tmp)) {
1188         if (is_imm && use_goto_tb(s, dest)) {
1189             /* Both exits can use goto_tb.  */
1190             update_cc_op(s);
1191 
1192             lab = gen_new_label();
1193             if (c->is_64) {
1194                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1195             } else {
1196                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1197             }
1198 
1199             /* Branch not taken.  */
1200             tcg_gen_goto_tb(0);
1201             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1202             tcg_gen_exit_tb(s->base.tb, 0);
1203 
1204             /* Branch taken.  */
1205             gen_set_label(lab);
1206             per_breaking_event(s);
1207             tcg_gen_goto_tb(1);
1208             tcg_gen_movi_i64(psw_addr, dest);
1209             tcg_gen_exit_tb(s->base.tb, 1);
1210 
1211             ret = DISAS_NORETURN;
1212         } else {
1213             /* Fallthru can use goto_tb, but taken branch cannot.  */
1214             /* Store taken branch destination before the brcond.  This
1215                avoids having to allocate a new local temp to hold it.
1216                We'll overwrite this in the not taken case anyway.  */
1217             if (!is_imm) {
1218                 tcg_gen_mov_i64(psw_addr, cdest);
1219             }
1220 
1221             lab = gen_new_label();
1222             if (c->is_64) {
1223                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1224             } else {
1225                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1226             }
1227 
1228             /* Branch not taken.  */
1229             update_cc_op(s);
1230             tcg_gen_goto_tb(0);
1231             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1232             tcg_gen_exit_tb(s->base.tb, 0);
1233 
1234             gen_set_label(lab);
1235             if (is_imm) {
1236                 tcg_gen_movi_i64(psw_addr, dest);
1237             }
1238             per_breaking_event(s);
1239             ret = DISAS_PC_UPDATED;
1240         }
1241     } else {
1242         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1243            Most commonly we're single-stepping or some other condition that
1244            disables all use of goto_tb.  Just update the PC and exit.  */
1245 
1246         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1247         if (is_imm) {
1248             cdest = tcg_constant_i64(dest);
1249         }
1250 
1251         if (c->is_64) {
1252             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1253                                 cdest, next);
1254             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1255         } else {
1256             TCGv_i32 t0 = tcg_temp_new_i32();
1257             TCGv_i64 t1 = tcg_temp_new_i64();
1258             TCGv_i64 z = tcg_constant_i64(0);
1259             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1260             tcg_gen_extu_i32_i64(t1, t0);
1261             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1262             per_branch_cond(s, TCG_COND_NE, t1, z);
1263         }
1264 
1265         ret = DISAS_PC_UPDATED;
1266     }
1267 
1268  egress:
1269     return ret;
1270 }
1271 
1272 /* ====================================================================== */
1273 /* The operations.  These perform the bulk of the work for any insn,
1274    usually after the operands have been loaded and output initialized.  */
1275 
1276 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1277 {
1278     tcg_gen_abs_i64(o->out, o->in2);
1279     return DISAS_NEXT;
1280 }
1281 
1282 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1283 {
1284     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1285     return DISAS_NEXT;
1286 }
1287 
1288 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1289 {
1290     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1291     return DISAS_NEXT;
1292 }
1293 
1294 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1295 {
1296     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1297     tcg_gen_mov_i64(o->out2, o->in2);
1298     return DISAS_NEXT;
1299 }
1300 
1301 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1302 {
1303     tcg_gen_add_i64(o->out, o->in1, o->in2);
1304     return DISAS_NEXT;
1305 }
1306 
1307 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1308 {
1309     tcg_gen_movi_i64(cc_src, 0);
1310     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1311     return DISAS_NEXT;
1312 }
1313 
1314 /* Compute carry into cc_src. */
1315 static void compute_carry(DisasContext *s)
1316 {
1317     switch (s->cc_op) {
1318     case CC_OP_ADDU:
1319         /* The carry value is already in cc_src (1,0). */
1320         break;
1321     case CC_OP_SUBU:
1322         tcg_gen_addi_i64(cc_src, cc_src, 1);
1323         break;
1324     default:
1325         gen_op_calc_cc(s);
1326         /* fall through */
1327     case CC_OP_STATIC:
1328         /* The carry flag is the msb of CC; compute into cc_src. */
1329         tcg_gen_extu_i32_i64(cc_src, cc_op);
1330         tcg_gen_shri_i64(cc_src, cc_src, 1);
1331         break;
1332     }
1333 }
1334 
1335 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1336 {
1337     compute_carry(s);
1338     tcg_gen_add_i64(o->out, o->in1, o->in2);
1339     tcg_gen_add_i64(o->out, o->out, cc_src);
1340     return DISAS_NEXT;
1341 }
1342 
1343 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1344 {
1345     compute_carry(s);
1346 
1347     TCGv_i64 zero = tcg_constant_i64(0);
1348     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1349     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1350 
1351     return DISAS_NEXT;
1352 }
1353 
1354 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1355 {
1356     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1357 
1358     o->in1 = tcg_temp_new_i64();
1359     if (non_atomic) {
1360         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1361     } else {
1362         /* Perform the atomic addition in memory. */
1363         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1364                                      s->insn->data);
1365     }
1366 
1367     /* Recompute also for atomic case: needed for setting CC. */
1368     tcg_gen_add_i64(o->out, o->in1, o->in2);
1369 
1370     if (non_atomic) {
1371         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1372     }
1373     return DISAS_NEXT;
1374 }
1375 
1376 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1377 {
1378     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1379 
1380     o->in1 = tcg_temp_new_i64();
1381     if (non_atomic) {
1382         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1383     } else {
1384         /* Perform the atomic addition in memory. */
1385         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1386                                      s->insn->data);
1387     }
1388 
1389     /* Recompute also for atomic case: needed for setting CC. */
1390     tcg_gen_movi_i64(cc_src, 0);
1391     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1392 
1393     if (non_atomic) {
1394         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1395     }
1396     return DISAS_NEXT;
1397 }
1398 
1399 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1400 {
1401     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1402     return DISAS_NEXT;
1403 }
1404 
1405 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1406 {
1407     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1408     return DISAS_NEXT;
1409 }
1410 
1411 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1412 {
1413     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1414     return DISAS_NEXT;
1415 }
1416 
1417 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1418 {
1419     tcg_gen_and_i64(o->out, o->in1, o->in2);
1420     return DISAS_NEXT;
1421 }
1422 
1423 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1424 {
1425     int shift = s->insn->data & 0xff;
1426     int size = s->insn->data >> 8;
1427     uint64_t mask = ((1ull << size) - 1) << shift;
1428     TCGv_i64 t = tcg_temp_new_i64();
1429 
1430     tcg_gen_shli_i64(t, o->in2, shift);
1431     tcg_gen_ori_i64(t, t, ~mask);
1432     tcg_gen_and_i64(o->out, o->in1, t);
1433 
1434     /* Produce the CC from only the bits manipulated.  */
1435     tcg_gen_andi_i64(cc_dst, o->out, mask);
1436     set_cc_nz_u64(s, cc_dst);
1437     return DISAS_NEXT;
1438 }
1439 
1440 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1441 {
1442     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1443     return DISAS_NEXT;
1444 }
1445 
1446 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1447 {
1448     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1449     return DISAS_NEXT;
1450 }
1451 
1452 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1453 {
1454     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1455     return DISAS_NEXT;
1456 }
1457 
1458 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1459 {
1460     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1461     return DISAS_NEXT;
1462 }
1463 
1464 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1465 {
1466     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1467     return DISAS_NEXT;
1468 }
1469 
1470 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1471 {
1472     o->in1 = tcg_temp_new_i64();
1473 
1474     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1475         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1476     } else {
1477         /* Perform the atomic operation in memory. */
1478         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1479                                      s->insn->data);
1480     }
1481 
1482     /* Recompute also for atomic case: needed for setting CC. */
1483     tcg_gen_and_i64(o->out, o->in1, o->in2);
1484 
1485     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1486         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1487     }
1488     return DISAS_NEXT;
1489 }
1490 
1491 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1492 {
1493     pc_to_link_info(o->out, s, s->pc_tmp);
1494     if (o->in2) {
1495         tcg_gen_mov_i64(psw_addr, o->in2);
1496         per_branch(s, false);
1497         return DISAS_PC_UPDATED;
1498     } else {
1499         return DISAS_NEXT;
1500     }
1501 }
1502 
1503 static void save_link_info(DisasContext *s, DisasOps *o)
1504 {
1505     TCGv_i64 t;
1506 
1507     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1508         pc_to_link_info(o->out, s, s->pc_tmp);
1509         return;
1510     }
1511     gen_op_calc_cc(s);
1512     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1513     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1514     t = tcg_temp_new_i64();
1515     tcg_gen_shri_i64(t, psw_mask, 16);
1516     tcg_gen_andi_i64(t, t, 0x0f000000);
1517     tcg_gen_or_i64(o->out, o->out, t);
1518     tcg_gen_extu_i32_i64(t, cc_op);
1519     tcg_gen_shli_i64(t, t, 28);
1520     tcg_gen_or_i64(o->out, o->out, t);
1521 }
1522 
1523 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1524 {
1525     save_link_info(s, o);
1526     if (o->in2) {
1527         tcg_gen_mov_i64(psw_addr, o->in2);
1528         per_branch(s, false);
1529         return DISAS_PC_UPDATED;
1530     } else {
1531         return DISAS_NEXT;
1532     }
1533 }
1534 
1535 /*
1536  * Disassemble the target of a branch. The results are returned in a form
1537  * suitable for passing into help_branch():
1538  *
1539  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1540  *   branches, whose DisasContext *S contains the relative immediate field RI,
1541  *   are considered fixed. All the other branches are considered computed.
1542  * - int IMM is the value of RI.
1543  * - TCGv_i64 CDEST is the address of the computed target.
1544  */
1545 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1546     if (have_field(s, ri)) {                                                   \
1547         if (unlikely(s->ex_value)) {                                           \
1548             cdest = tcg_temp_new_i64();                                        \
1549             tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\
1550             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1551             is_imm = false;                                                    \
1552         } else {                                                               \
1553             is_imm = true;                                                     \
1554         }                                                                      \
1555     } else {                                                                   \
1556         is_imm = false;                                                        \
1557     }                                                                          \
1558     imm = is_imm ? get_field(s, ri) : 0;                                       \
1559 } while (false)
1560 
1561 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1562 {
1563     DisasCompare c;
1564     bool is_imm;
1565     int imm;
1566 
1567     pc_to_link_info(o->out, s, s->pc_tmp);
1568 
1569     disas_jdest(s, i2, is_imm, imm, o->in2);
1570     disas_jcc(s, &c, 0xf);
1571     return help_branch(s, &c, is_imm, imm, o->in2);
1572 }
1573 
1574 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1575 {
1576     int m1 = get_field(s, m1);
1577     DisasCompare c;
1578     bool is_imm;
1579     int imm;
1580 
1581     /* BCR with R2 = 0 causes no branching */
1582     if (have_field(s, r2) && get_field(s, r2) == 0) {
1583         if (m1 == 14) {
1584             /* Perform serialization */
1585             /* FIXME: check for fast-BCR-serialization facility */
1586             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1587         }
1588         if (m1 == 15) {
1589             /* Perform serialization */
1590             /* FIXME: perform checkpoint-synchronisation */
1591             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1592         }
1593         return DISAS_NEXT;
1594     }
1595 
1596     disas_jdest(s, i2, is_imm, imm, o->in2);
1597     disas_jcc(s, &c, m1);
1598     return help_branch(s, &c, is_imm, imm, o->in2);
1599 }
1600 
1601 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1602 {
1603     int r1 = get_field(s, r1);
1604     DisasCompare c;
1605     bool is_imm;
1606     TCGv_i64 t;
1607     int imm;
1608 
1609     c.cond = TCG_COND_NE;
1610     c.is_64 = false;
1611 
1612     t = tcg_temp_new_i64();
1613     tcg_gen_subi_i64(t, regs[r1], 1);
1614     store_reg32_i64(r1, t);
1615     c.u.s32.a = tcg_temp_new_i32();
1616     c.u.s32.b = tcg_constant_i32(0);
1617     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1618 
1619     disas_jdest(s, i2, is_imm, imm, o->in2);
1620     return help_branch(s, &c, is_imm, imm, o->in2);
1621 }
1622 
1623 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1624 {
1625     int r1 = get_field(s, r1);
1626     int imm = get_field(s, i2);
1627     DisasCompare c;
1628     TCGv_i64 t;
1629 
1630     c.cond = TCG_COND_NE;
1631     c.is_64 = false;
1632 
1633     t = tcg_temp_new_i64();
1634     tcg_gen_shri_i64(t, regs[r1], 32);
1635     tcg_gen_subi_i64(t, t, 1);
1636     store_reg32h_i64(r1, t);
1637     c.u.s32.a = tcg_temp_new_i32();
1638     c.u.s32.b = tcg_constant_i32(0);
1639     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1640 
1641     return help_branch(s, &c, 1, imm, o->in2);
1642 }
1643 
1644 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1645 {
1646     int r1 = get_field(s, r1);
1647     DisasCompare c;
1648     bool is_imm;
1649     int imm;
1650 
1651     c.cond = TCG_COND_NE;
1652     c.is_64 = true;
1653 
1654     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1655     c.u.s64.a = regs[r1];
1656     c.u.s64.b = tcg_constant_i64(0);
1657 
1658     disas_jdest(s, i2, is_imm, imm, o->in2);
1659     return help_branch(s, &c, is_imm, imm, o->in2);
1660 }
1661 
1662 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1663 {
1664     int r1 = get_field(s, r1);
1665     int r3 = get_field(s, r3);
1666     DisasCompare c;
1667     bool is_imm;
1668     TCGv_i64 t;
1669     int imm;
1670 
1671     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1672     c.is_64 = false;
1673 
1674     t = tcg_temp_new_i64();
1675     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1676     c.u.s32.a = tcg_temp_new_i32();
1677     c.u.s32.b = tcg_temp_new_i32();
1678     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1679     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1680     store_reg32_i64(r1, t);
1681 
1682     disas_jdest(s, i2, is_imm, imm, o->in2);
1683     return help_branch(s, &c, is_imm, imm, o->in2);
1684 }
1685 
1686 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1687 {
1688     int r1 = get_field(s, r1);
1689     int r3 = get_field(s, r3);
1690     DisasCompare c;
1691     bool is_imm;
1692     int imm;
1693 
1694     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1695     c.is_64 = true;
1696 
1697     if (r1 == (r3 | 1)) {
1698         c.u.s64.b = load_reg(r3 | 1);
1699     } else {
1700         c.u.s64.b = regs[r3 | 1];
1701     }
1702 
1703     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1704     c.u.s64.a = regs[r1];
1705 
1706     disas_jdest(s, i2, is_imm, imm, o->in2);
1707     return help_branch(s, &c, is_imm, imm, o->in2);
1708 }
1709 
1710 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1711 {
1712     int imm, m3 = get_field(s, m3);
1713     bool is_imm;
1714     DisasCompare c;
1715 
1716     c.cond = ltgt_cond[m3];
1717     if (s->insn->data) {
1718         c.cond = tcg_unsigned_cond(c.cond);
1719     }
1720     c.is_64 = true;
1721     c.u.s64.a = o->in1;
1722     c.u.s64.b = o->in2;
1723 
1724     o->out = NULL;
1725     disas_jdest(s, i4, is_imm, imm, o->out);
1726     if (!is_imm && !o->out) {
1727         imm = 0;
1728         o->out = get_address(s, 0, get_field(s, b4),
1729                              get_field(s, d4));
1730     }
1731 
1732     return help_branch(s, &c, is_imm, imm, o->out);
1733 }
1734 
1735 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1736 {
1737     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1738     set_cc_static(s);
1739     return DISAS_NEXT;
1740 }
1741 
1742 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1743 {
1744     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1745     set_cc_static(s);
1746     return DISAS_NEXT;
1747 }
1748 
1749 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1750 {
1751     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1752     set_cc_static(s);
1753     return DISAS_NEXT;
1754 }
1755 
1756 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1757                                    bool m4_with_fpe)
1758 {
1759     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1760     uint8_t m3 = get_field(s, m3);
1761     uint8_t m4 = get_field(s, m4);
1762 
1763     /* m3 field was introduced with FPE */
1764     if (!fpe && m3_with_fpe) {
1765         m3 = 0;
1766     }
1767     /* m4 field was introduced with FPE */
1768     if (!fpe && m4_with_fpe) {
1769         m4 = 0;
1770     }
1771 
1772     /* Check for valid rounding modes. Mode 3 was introduced later. */
1773     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1774         gen_program_exception(s, PGM_SPECIFICATION);
1775         return NULL;
1776     }
1777 
1778     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1779 }
1780 
1781 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1782 {
1783     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1784 
1785     if (!m34) {
1786         return DISAS_NORETURN;
1787     }
1788     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1789     set_cc_static(s);
1790     return DISAS_NEXT;
1791 }
1792 
1793 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1794 {
1795     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1796 
1797     if (!m34) {
1798         return DISAS_NORETURN;
1799     }
1800     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1801     set_cc_static(s);
1802     return DISAS_NEXT;
1803 }
1804 
1805 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1806 {
1807     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1808 
1809     if (!m34) {
1810         return DISAS_NORETURN;
1811     }
1812     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1813     set_cc_static(s);
1814     return DISAS_NEXT;
1815 }
1816 
1817 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1818 {
1819     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1820 
1821     if (!m34) {
1822         return DISAS_NORETURN;
1823     }
1824     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1825     set_cc_static(s);
1826     return DISAS_NEXT;
1827 }
1828 
1829 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1830 {
1831     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1832 
1833     if (!m34) {
1834         return DISAS_NORETURN;
1835     }
1836     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1837     set_cc_static(s);
1838     return DISAS_NEXT;
1839 }
1840 
1841 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1842 {
1843     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1844 
1845     if (!m34) {
1846         return DISAS_NORETURN;
1847     }
1848     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1849     set_cc_static(s);
1850     return DISAS_NEXT;
1851 }
1852 
1853 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1854 {
1855     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1856 
1857     if (!m34) {
1858         return DISAS_NORETURN;
1859     }
1860     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1861     set_cc_static(s);
1862     return DISAS_NEXT;
1863 }
1864 
1865 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1866 {
1867     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1868 
1869     if (!m34) {
1870         return DISAS_NORETURN;
1871     }
1872     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1873     set_cc_static(s);
1874     return DISAS_NEXT;
1875 }
1876 
1877 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1878 {
1879     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1880 
1881     if (!m34) {
1882         return DISAS_NORETURN;
1883     }
1884     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1885     set_cc_static(s);
1886     return DISAS_NEXT;
1887 }
1888 
1889 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1890 {
1891     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1892 
1893     if (!m34) {
1894         return DISAS_NORETURN;
1895     }
1896     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1897     set_cc_static(s);
1898     return DISAS_NEXT;
1899 }
1900 
1901 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1902 {
1903     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1904 
1905     if (!m34) {
1906         return DISAS_NORETURN;
1907     }
1908     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1909     set_cc_static(s);
1910     return DISAS_NEXT;
1911 }
1912 
1913 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1914 {
1915     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1916 
1917     if (!m34) {
1918         return DISAS_NORETURN;
1919     }
1920     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1921     set_cc_static(s);
1922     return DISAS_NEXT;
1923 }
1924 
1925 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1926 {
1927     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1928 
1929     if (!m34) {
1930         return DISAS_NORETURN;
1931     }
1932     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1933     return DISAS_NEXT;
1934 }
1935 
1936 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1937 {
1938     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1939 
1940     if (!m34) {
1941         return DISAS_NORETURN;
1942     }
1943     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1944     return DISAS_NEXT;
1945 }
1946 
1947 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1948 {
1949     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1950 
1951     if (!m34) {
1952         return DISAS_NORETURN;
1953     }
1954     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
1955     return DISAS_NEXT;
1956 }
1957 
1958 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1959 {
1960     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1961 
1962     if (!m34) {
1963         return DISAS_NORETURN;
1964     }
1965     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1966     return DISAS_NEXT;
1967 }
1968 
1969 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1970 {
1971     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1972 
1973     if (!m34) {
1974         return DISAS_NORETURN;
1975     }
1976     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
1977     return DISAS_NEXT;
1978 }
1979 
1980 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1981 {
1982     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1983 
1984     if (!m34) {
1985         return DISAS_NORETURN;
1986     }
1987     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
1988     return DISAS_NEXT;
1989 }
1990 
1991 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1992 {
1993     int r2 = get_field(s, r2);
1994     TCGv_i128 pair = tcg_temp_new_i128();
1995     TCGv_i64 len = tcg_temp_new_i64();
1996 
1997     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1998     set_cc_static(s);
1999     tcg_gen_extr_i128_i64(o->out, len, pair);
2000 
2001     tcg_gen_add_i64(regs[r2], regs[r2], len);
2002     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2003 
2004     return DISAS_NEXT;
2005 }
2006 
2007 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2008 {
2009     int l = get_field(s, l1);
2010     TCGv_i32 vl;
2011     MemOp mop;
2012 
2013     switch (l + 1) {
2014     case 1:
2015     case 2:
2016     case 4:
2017     case 8:
2018         mop = ctz32(l + 1) | MO_TE;
2019         tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop);
2020         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
2021         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2022         return DISAS_NEXT;
2023     default:
2024         vl = tcg_constant_i32(l);
2025         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2026         set_cc_static(s);
2027         return DISAS_NEXT;
2028     }
2029 }
2030 
2031 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2032 {
2033     int r1 = get_field(s, r1);
2034     int r2 = get_field(s, r2);
2035     TCGv_i32 t1, t2;
2036 
2037     /* r1 and r2 must be even.  */
2038     if (r1 & 1 || r2 & 1) {
2039         gen_program_exception(s, PGM_SPECIFICATION);
2040         return DISAS_NORETURN;
2041     }
2042 
2043     t1 = tcg_constant_i32(r1);
2044     t2 = tcg_constant_i32(r2);
2045     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2046     set_cc_static(s);
2047     return DISAS_NEXT;
2048 }
2049 
2050 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2051 {
2052     int r1 = get_field(s, r1);
2053     int r3 = get_field(s, r3);
2054     TCGv_i32 t1, t3;
2055 
2056     /* r1 and r3 must be even.  */
2057     if (r1 & 1 || r3 & 1) {
2058         gen_program_exception(s, PGM_SPECIFICATION);
2059         return DISAS_NORETURN;
2060     }
2061 
2062     t1 = tcg_constant_i32(r1);
2063     t3 = tcg_constant_i32(r3);
2064     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2065     set_cc_static(s);
2066     return DISAS_NEXT;
2067 }
2068 
2069 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2070 {
2071     int r1 = get_field(s, r1);
2072     int r3 = get_field(s, r3);
2073     TCGv_i32 t1, t3;
2074 
2075     /* r1 and r3 must be even.  */
2076     if (r1 & 1 || r3 & 1) {
2077         gen_program_exception(s, PGM_SPECIFICATION);
2078         return DISAS_NORETURN;
2079     }
2080 
2081     t1 = tcg_constant_i32(r1);
2082     t3 = tcg_constant_i32(r3);
2083     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2084     set_cc_static(s);
2085     return DISAS_NEXT;
2086 }
2087 
2088 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2089 {
2090     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2091     TCGv_i32 t1 = tcg_temp_new_i32();
2092 
2093     tcg_gen_extrl_i64_i32(t1, o->in1);
2094     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2095     set_cc_static(s);
2096     return DISAS_NEXT;
2097 }
2098 
2099 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2100 {
2101     TCGv_i128 pair = tcg_temp_new_i128();
2102 
2103     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2104     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2105 
2106     set_cc_static(s);
2107     return DISAS_NEXT;
2108 }
2109 
2110 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2111 {
2112     TCGv_i64 t = tcg_temp_new_i64();
2113     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2114     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2115     tcg_gen_or_i64(o->out, o->out, t);
2116     return DISAS_NEXT;
2117 }
2118 
2119 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2120 {
2121     int d2 = get_field(s, d2);
2122     int b2 = get_field(s, b2);
2123     TCGv_i64 addr, cc;
2124 
2125     /* Note that in1 = R3 (new value) and
2126        in2 = (zero-extended) R1 (expected value).  */
2127 
2128     addr = get_address(s, 0, b2, d2);
2129     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2130                                get_mem_index(s), s->insn->data | MO_ALIGN);
2131 
2132     /* Are the memory and expected values (un)equal?  Note that this setcond
2133        produces the output CC value, thus the NE sense of the test.  */
2134     cc = tcg_temp_new_i64();
2135     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2136     tcg_gen_extrl_i64_i32(cc_op, cc);
2137     set_cc_static(s);
2138 
2139     return DISAS_NEXT;
2140 }
2141 
2142 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2143 {
2144     int r1 = get_field(s, r1);
2145 
2146     o->out_128 = tcg_temp_new_i128();
2147     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2148 
2149     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2150     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2151                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2152 
2153     /*
2154      * Extract result into cc_dst:cc_src, compare vs the expected value
2155      * in the as yet unmodified input registers, then update CC_OP.
2156      */
2157     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2158     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2159     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2160     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2161     set_cc_nz_u64(s, cc_dst);
2162 
2163     return DISAS_NEXT;
2164 }
2165 
2166 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2167 {
2168     int r3 = get_field(s, r3);
2169     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2170 
2171     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2172         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2173     } else {
2174         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2175     }
2176 
2177     set_cc_static(s);
2178     return DISAS_NEXT;
2179 }
2180 
2181 #ifndef CONFIG_USER_ONLY
2182 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2183 {
2184     MemOp mop = s->insn->data;
2185     TCGv_i64 addr, old, cc;
2186     TCGLabel *lab = gen_new_label();
2187 
2188     /* Note that in1 = R1 (zero-extended expected value),
2189        out = R1 (original reg), out2 = R1+1 (new value).  */
2190 
2191     addr = tcg_temp_new_i64();
2192     old = tcg_temp_new_i64();
2193     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2194     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2195                                get_mem_index(s), mop | MO_ALIGN);
2196 
2197     /* Are the memory and expected values (un)equal?  */
2198     cc = tcg_temp_new_i64();
2199     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2200     tcg_gen_extrl_i64_i32(cc_op, cc);
2201 
2202     /* Write back the output now, so that it happens before the
2203        following branch, so that we don't need local temps.  */
2204     if ((mop & MO_SIZE) == MO_32) {
2205         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2206     } else {
2207         tcg_gen_mov_i64(o->out, old);
2208     }
2209 
2210     /* If the comparison was equal, and the LSB of R2 was set,
2211        then we need to flush the TLB (for all cpus).  */
2212     tcg_gen_xori_i64(cc, cc, 1);
2213     tcg_gen_and_i64(cc, cc, o->in2);
2214     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2215 
2216     gen_helper_purge(cpu_env);
2217     gen_set_label(lab);
2218 
2219     return DISAS_NEXT;
2220 }
2221 #endif
2222 
2223 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2224 {
2225     TCGv_i64 t1 = tcg_temp_new_i64();
2226     TCGv_i32 t2 = tcg_temp_new_i32();
2227     tcg_gen_extrl_i64_i32(t2, o->in1);
2228     gen_helper_cvd(t1, t2);
2229     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2230     return DISAS_NEXT;
2231 }
2232 
2233 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2234 {
2235     int m3 = get_field(s, m3);
2236     TCGLabel *lab = gen_new_label();
2237     TCGCond c;
2238 
2239     c = tcg_invert_cond(ltgt_cond[m3]);
2240     if (s->insn->data) {
2241         c = tcg_unsigned_cond(c);
2242     }
2243     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2244 
2245     /* Trap.  */
2246     gen_trap(s);
2247 
2248     gen_set_label(lab);
2249     return DISAS_NEXT;
2250 }
2251 
2252 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2253 {
2254     int m3 = get_field(s, m3);
2255     int r1 = get_field(s, r1);
2256     int r2 = get_field(s, r2);
2257     TCGv_i32 tr1, tr2, chk;
2258 
2259     /* R1 and R2 must both be even.  */
2260     if ((r1 | r2) & 1) {
2261         gen_program_exception(s, PGM_SPECIFICATION);
2262         return DISAS_NORETURN;
2263     }
2264     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2265         m3 = 0;
2266     }
2267 
2268     tr1 = tcg_constant_i32(r1);
2269     tr2 = tcg_constant_i32(r2);
2270     chk = tcg_constant_i32(m3);
2271 
2272     switch (s->insn->data) {
2273     case 12:
2274         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2275         break;
2276     case 14:
2277         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2278         break;
2279     case 21:
2280         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2281         break;
2282     case 24:
2283         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2284         break;
2285     case 41:
2286         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2287         break;
2288     case 42:
2289         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2290         break;
2291     default:
2292         g_assert_not_reached();
2293     }
2294 
2295     set_cc_static(s);
2296     return DISAS_NEXT;
2297 }
2298 
2299 #ifndef CONFIG_USER_ONLY
2300 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2301 {
2302     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2303     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2304     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2305 
2306     gen_helper_diag(cpu_env, r1, r3, func_code);
2307     return DISAS_NEXT;
2308 }
2309 #endif
2310 
2311 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2312 {
2313     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2314     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2315     return DISAS_NEXT;
2316 }
2317 
2318 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2319 {
2320     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2321     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2322     return DISAS_NEXT;
2323 }
2324 
2325 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2326 {
2327     TCGv_i128 t = tcg_temp_new_i128();
2328 
2329     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2330     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2331     return DISAS_NEXT;
2332 }
2333 
2334 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2335 {
2336     TCGv_i128 t = tcg_temp_new_i128();
2337 
2338     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2339     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2340     return DISAS_NEXT;
2341 }
2342 
2343 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2344 {
2345     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2346     return DISAS_NEXT;
2347 }
2348 
2349 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2350 {
2351     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2352     return DISAS_NEXT;
2353 }
2354 
2355 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2356 {
2357     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2358     return DISAS_NEXT;
2359 }
2360 
2361 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2362 {
2363     int r2 = get_field(s, r2);
2364     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2365     return DISAS_NEXT;
2366 }
2367 
2368 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2369 {
2370     /* No cache information provided.  */
2371     tcg_gen_movi_i64(o->out, -1);
2372     return DISAS_NEXT;
2373 }
2374 
2375 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2376 {
2377     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2378     return DISAS_NEXT;
2379 }
2380 
2381 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2382 {
2383     int r1 = get_field(s, r1);
2384     int r2 = get_field(s, r2);
2385     TCGv_i64 t = tcg_temp_new_i64();
2386     TCGv_i64 t_cc = tcg_temp_new_i64();
2387 
2388     /* Note the "subsequently" in the PoO, which implies a defined result
2389        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2390     gen_op_calc_cc(s);
2391     tcg_gen_extu_i32_i64(t_cc, cc_op);
2392     tcg_gen_shri_i64(t, psw_mask, 32);
2393     tcg_gen_deposit_i64(t, t, t_cc, 12, 2);
2394     store_reg32_i64(r1, t);
2395     if (r2 != 0) {
2396         store_reg32_i64(r2, psw_mask);
2397     }
2398     return DISAS_NEXT;
2399 }
2400 
2401 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2402 {
2403     int r1 = get_field(s, r1);
2404     TCGv_i32 ilen;
2405     TCGv_i64 v1;
2406 
2407     /* Nested EXECUTE is not allowed.  */
2408     if (unlikely(s->ex_value)) {
2409         gen_program_exception(s, PGM_EXECUTE);
2410         return DISAS_NORETURN;
2411     }
2412 
2413     update_psw_addr(s);
2414     update_cc_op(s);
2415 
2416     if (r1 == 0) {
2417         v1 = tcg_constant_i64(0);
2418     } else {
2419         v1 = regs[r1];
2420     }
2421 
2422     ilen = tcg_constant_i32(s->ilen);
2423     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2424 
2425     return DISAS_PC_CC_UPDATED;
2426 }
2427 
2428 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2429 {
2430     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2431 
2432     if (!m34) {
2433         return DISAS_NORETURN;
2434     }
2435     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2436     return DISAS_NEXT;
2437 }
2438 
2439 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2440 {
2441     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2442 
2443     if (!m34) {
2444         return DISAS_NORETURN;
2445     }
2446     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2447     return DISAS_NEXT;
2448 }
2449 
2450 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2451 {
2452     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2453 
2454     if (!m34) {
2455         return DISAS_NORETURN;
2456     }
2457     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2458     return DISAS_NEXT;
2459 }
2460 
2461 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2462 {
2463     /* We'll use the original input for cc computation, since we get to
2464        compare that against 0, which ought to be better than comparing
2465        the real output against 64.  It also lets cc_dst be a convenient
2466        temporary during our computation.  */
2467     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2468 
2469     /* R1 = IN ? CLZ(IN) : 64.  */
2470     tcg_gen_clzi_i64(o->out, o->in2, 64);
2471 
2472     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2473        value by 64, which is undefined.  But since the shift is 64 iff the
2474        input is zero, we still get the correct result after and'ing.  */
2475     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2476     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2477     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2478     return DISAS_NEXT;
2479 }
2480 
2481 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2482 {
2483     int m3 = get_field(s, m3);
2484     int pos, len, base = s->insn->data;
2485     TCGv_i64 tmp = tcg_temp_new_i64();
2486     uint64_t ccm;
2487 
2488     switch (m3) {
2489     case 0xf:
2490         /* Effectively a 32-bit load.  */
2491         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2492         len = 32;
2493         goto one_insert;
2494 
2495     case 0xc:
2496     case 0x6:
2497     case 0x3:
2498         /* Effectively a 16-bit load.  */
2499         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2500         len = 16;
2501         goto one_insert;
2502 
2503     case 0x8:
2504     case 0x4:
2505     case 0x2:
2506     case 0x1:
2507         /* Effectively an 8-bit load.  */
2508         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2509         len = 8;
2510         goto one_insert;
2511 
2512     one_insert:
2513         pos = base + ctz32(m3) * 8;
2514         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2515         ccm = ((1ull << len) - 1) << pos;
2516         break;
2517 
2518     default:
2519         /* This is going to be a sequence of loads and inserts.  */
2520         pos = base + 32 - 8;
2521         ccm = 0;
2522         while (m3) {
2523             if (m3 & 0x8) {
2524                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2525                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2526                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2527                 ccm |= 0xffull << pos;
2528             }
2529             m3 = (m3 << 1) & 0xf;
2530             pos -= 8;
2531         }
2532         break;
2533     }
2534 
2535     tcg_gen_movi_i64(tmp, ccm);
2536     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2537     return DISAS_NEXT;
2538 }
2539 
2540 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2541 {
2542     int shift = s->insn->data & 0xff;
2543     int size = s->insn->data >> 8;
2544     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2545     return DISAS_NEXT;
2546 }
2547 
2548 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2549 {
2550     TCGv_i64 t1, t2;
2551 
2552     gen_op_calc_cc(s);
2553     t1 = tcg_temp_new_i64();
2554     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2555     t2 = tcg_temp_new_i64();
2556     tcg_gen_extu_i32_i64(t2, cc_op);
2557     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2558     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2559     return DISAS_NEXT;
2560 }
2561 
2562 #ifndef CONFIG_USER_ONLY
2563 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2564 {
2565     TCGv_i32 m4;
2566 
2567     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2568         m4 = tcg_constant_i32(get_field(s, m4));
2569     } else {
2570         m4 = tcg_constant_i32(0);
2571     }
2572     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2573     return DISAS_NEXT;
2574 }
2575 
2576 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2577 {
2578     TCGv_i32 m4;
2579 
2580     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2581         m4 = tcg_constant_i32(get_field(s, m4));
2582     } else {
2583         m4 = tcg_constant_i32(0);
2584     }
2585     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2586     return DISAS_NEXT;
2587 }
2588 
2589 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2590 {
2591     gen_helper_iske(o->out, cpu_env, o->in2);
2592     return DISAS_NEXT;
2593 }
2594 #endif
2595 
2596 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2597 {
2598     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2599     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2600     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2601     TCGv_i32 t_r1, t_r2, t_r3, type;
2602 
2603     switch (s->insn->data) {
2604     case S390_FEAT_TYPE_KMA:
2605         if (r3 == r1 || r3 == r2) {
2606             gen_program_exception(s, PGM_SPECIFICATION);
2607             return DISAS_NORETURN;
2608         }
2609         /* FALL THROUGH */
2610     case S390_FEAT_TYPE_KMCTR:
2611         if (r3 & 1 || !r3) {
2612             gen_program_exception(s, PGM_SPECIFICATION);
2613             return DISAS_NORETURN;
2614         }
2615         /* FALL THROUGH */
2616     case S390_FEAT_TYPE_PPNO:
2617     case S390_FEAT_TYPE_KMF:
2618     case S390_FEAT_TYPE_KMC:
2619     case S390_FEAT_TYPE_KMO:
2620     case S390_FEAT_TYPE_KM:
2621         if (r1 & 1 || !r1) {
2622             gen_program_exception(s, PGM_SPECIFICATION);
2623             return DISAS_NORETURN;
2624         }
2625         /* FALL THROUGH */
2626     case S390_FEAT_TYPE_KMAC:
2627     case S390_FEAT_TYPE_KIMD:
2628     case S390_FEAT_TYPE_KLMD:
2629         if (r2 & 1 || !r2) {
2630             gen_program_exception(s, PGM_SPECIFICATION);
2631             return DISAS_NORETURN;
2632         }
2633         /* FALL THROUGH */
2634     case S390_FEAT_TYPE_PCKMO:
2635     case S390_FEAT_TYPE_PCC:
2636         break;
2637     default:
2638         g_assert_not_reached();
2639     };
2640 
2641     t_r1 = tcg_constant_i32(r1);
2642     t_r2 = tcg_constant_i32(r2);
2643     t_r3 = tcg_constant_i32(r3);
2644     type = tcg_constant_i32(s->insn->data);
2645     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2646     set_cc_static(s);
2647     return DISAS_NEXT;
2648 }
2649 
2650 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2651 {
2652     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2653     set_cc_static(s);
2654     return DISAS_NEXT;
2655 }
2656 
2657 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2658 {
2659     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2660     set_cc_static(s);
2661     return DISAS_NEXT;
2662 }
2663 
2664 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2665 {
2666     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2667     set_cc_static(s);
2668     return DISAS_NEXT;
2669 }
2670 
2671 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2672 {
2673     /* The real output is indeed the original value in memory;
2674        recompute the addition for the computation of CC.  */
2675     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2676                                  s->insn->data | MO_ALIGN);
2677     /* However, we need to recompute the addition for setting CC.  */
2678     tcg_gen_add_i64(o->out, o->in1, o->in2);
2679     return DISAS_NEXT;
2680 }
2681 
2682 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2683 {
2684     /* The real output is indeed the original value in memory;
2685        recompute the addition for the computation of CC.  */
2686     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2687                                  s->insn->data | MO_ALIGN);
2688     /* However, we need to recompute the operation for setting CC.  */
2689     tcg_gen_and_i64(o->out, o->in1, o->in2);
2690     return DISAS_NEXT;
2691 }
2692 
2693 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2694 {
2695     /* The real output is indeed the original value in memory;
2696        recompute the addition for the computation of CC.  */
2697     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2698                                 s->insn->data | MO_ALIGN);
2699     /* However, we need to recompute the operation for setting CC.  */
2700     tcg_gen_or_i64(o->out, o->in1, o->in2);
2701     return DISAS_NEXT;
2702 }
2703 
2704 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2705 {
2706     /* The real output is indeed the original value in memory;
2707        recompute the addition for the computation of CC.  */
2708     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2709                                  s->insn->data | MO_ALIGN);
2710     /* However, we need to recompute the operation for setting CC.  */
2711     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2712     return DISAS_NEXT;
2713 }
2714 
2715 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2716 {
2717     gen_helper_ldeb(o->out, cpu_env, o->in2);
2718     return DISAS_NEXT;
2719 }
2720 
2721 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2722 {
2723     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2724 
2725     if (!m34) {
2726         return DISAS_NORETURN;
2727     }
2728     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2729     return DISAS_NEXT;
2730 }
2731 
2732 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2733 {
2734     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2735 
2736     if (!m34) {
2737         return DISAS_NORETURN;
2738     }
2739     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2740     return DISAS_NEXT;
2741 }
2742 
2743 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2744 {
2745     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2746 
2747     if (!m34) {
2748         return DISAS_NORETURN;
2749     }
2750     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2751     return DISAS_NEXT;
2752 }
2753 
2754 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2755 {
2756     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2757     return DISAS_NEXT;
2758 }
2759 
2760 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2761 {
2762     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2763     return DISAS_NEXT;
2764 }
2765 
2766 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2767 {
2768     tcg_gen_shli_i64(o->out, o->in2, 32);
2769     return DISAS_NEXT;
2770 }
2771 
2772 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2773 {
2774     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2775     return DISAS_NEXT;
2776 }
2777 
2778 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2779 {
2780     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2781     return DISAS_NEXT;
2782 }
2783 
2784 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2785 {
2786     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2787     return DISAS_NEXT;
2788 }
2789 
2790 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2791 {
2792     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2793     return DISAS_NEXT;
2794 }
2795 
2796 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2797 {
2798     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2799     return DISAS_NEXT;
2800 }
2801 
2802 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2803 {
2804     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2805                        MO_TESL | s->insn->data);
2806     return DISAS_NEXT;
2807 }
2808 
2809 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2810 {
2811     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2812                        MO_TEUL | s->insn->data);
2813     return DISAS_NEXT;
2814 }
2815 
2816 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2817 {
2818     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2819                         MO_TEUQ | s->insn->data);
2820     return DISAS_NEXT;
2821 }
2822 
2823 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2824 {
2825     TCGLabel *lab = gen_new_label();
2826     store_reg32_i64(get_field(s, r1), o->in2);
2827     /* The value is stored even in case of trap. */
2828     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2829     gen_trap(s);
2830     gen_set_label(lab);
2831     return DISAS_NEXT;
2832 }
2833 
2834 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2835 {
2836     TCGLabel *lab = gen_new_label();
2837     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2838     /* The value is stored even in case of trap. */
2839     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2840     gen_trap(s);
2841     gen_set_label(lab);
2842     return DISAS_NEXT;
2843 }
2844 
2845 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2846 {
2847     TCGLabel *lab = gen_new_label();
2848     store_reg32h_i64(get_field(s, r1), o->in2);
2849     /* The value is stored even in case of trap. */
2850     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2851     gen_trap(s);
2852     gen_set_label(lab);
2853     return DISAS_NEXT;
2854 }
2855 
2856 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2857 {
2858     TCGLabel *lab = gen_new_label();
2859 
2860     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2861     /* The value is stored even in case of trap. */
2862     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2863     gen_trap(s);
2864     gen_set_label(lab);
2865     return DISAS_NEXT;
2866 }
2867 
2868 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2869 {
2870     TCGLabel *lab = gen_new_label();
2871     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2872     /* The value is stored even in case of trap. */
2873     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2874     gen_trap(s);
2875     gen_set_label(lab);
2876     return DISAS_NEXT;
2877 }
2878 
2879 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2880 {
2881     DisasCompare c;
2882 
2883     if (have_field(s, m3)) {
2884         /* LOAD * ON CONDITION */
2885         disas_jcc(s, &c, get_field(s, m3));
2886     } else {
2887         /* SELECT */
2888         disas_jcc(s, &c, get_field(s, m4));
2889     }
2890 
2891     if (c.is_64) {
2892         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2893                             o->in2, o->in1);
2894     } else {
2895         TCGv_i32 t32 = tcg_temp_new_i32();
2896         TCGv_i64 t, z;
2897 
2898         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2899 
2900         t = tcg_temp_new_i64();
2901         tcg_gen_extu_i32_i64(t, t32);
2902 
2903         z = tcg_constant_i64(0);
2904         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2905     }
2906 
2907     return DISAS_NEXT;
2908 }
2909 
2910 #ifndef CONFIG_USER_ONLY
2911 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2912 {
2913     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2914     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2915 
2916     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2917     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2918     s->exit_to_mainloop = true;
2919     return DISAS_TOO_MANY;
2920 }
2921 
2922 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2923 {
2924     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2925     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2926 
2927     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2928     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2929     s->exit_to_mainloop = true;
2930     return DISAS_TOO_MANY;
2931 }
2932 
2933 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2934 {
2935     gen_helper_lra(o->out, cpu_env, o->out, o->in2);
2936     set_cc_static(s);
2937     return DISAS_NEXT;
2938 }
2939 
2940 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2941 {
2942     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2943     return DISAS_NEXT;
2944 }
2945 
2946 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2947 {
2948     TCGv_i64 mask, addr;
2949 
2950     per_breaking_event(s);
2951 
2952     /*
2953      * Convert the short PSW into the normal PSW, similar to what
2954      * s390_cpu_load_normal() does.
2955      */
2956     mask = tcg_temp_new_i64();
2957     addr = tcg_temp_new_i64();
2958     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2959     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2960     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2961     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2962     gen_helper_load_psw(cpu_env, mask, addr);
2963     return DISAS_NORETURN;
2964 }
2965 
2966 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2967 {
2968     TCGv_i64 t1, t2;
2969 
2970     per_breaking_event(s);
2971 
2972     t1 = tcg_temp_new_i64();
2973     t2 = tcg_temp_new_i64();
2974     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2975                         MO_TEUQ | MO_ALIGN_8);
2976     tcg_gen_addi_i64(o->in2, o->in2, 8);
2977     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2978     gen_helper_load_psw(cpu_env, t1, t2);
2979     return DISAS_NORETURN;
2980 }
2981 #endif
2982 
2983 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2984 {
2985     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2986     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2987 
2988     gen_helper_lam(cpu_env, r1, o->in2, r3);
2989     return DISAS_NEXT;
2990 }
2991 
2992 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2993 {
2994     int r1 = get_field(s, r1);
2995     int r3 = get_field(s, r3);
2996     TCGv_i64 t1, t2;
2997 
2998     /* Only one register to read. */
2999     t1 = tcg_temp_new_i64();
3000     if (unlikely(r1 == r3)) {
3001         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3002         store_reg32_i64(r1, t1);
3003         return DISAS_NEXT;
3004     }
3005 
3006     /* First load the values of the first and last registers to trigger
3007        possible page faults. */
3008     t2 = tcg_temp_new_i64();
3009     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3010     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3011     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3012     store_reg32_i64(r1, t1);
3013     store_reg32_i64(r3, t2);
3014 
3015     /* Only two registers to read. */
3016     if (((r1 + 1) & 15) == r3) {
3017         return DISAS_NEXT;
3018     }
3019 
3020     /* Then load the remaining registers. Page fault can't occur. */
3021     r3 = (r3 - 1) & 15;
3022     tcg_gen_movi_i64(t2, 4);
3023     while (r1 != r3) {
3024         r1 = (r1 + 1) & 15;
3025         tcg_gen_add_i64(o->in2, o->in2, t2);
3026         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3027         store_reg32_i64(r1, t1);
3028     }
3029     return DISAS_NEXT;
3030 }
3031 
3032 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3033 {
3034     int r1 = get_field(s, r1);
3035     int r3 = get_field(s, r3);
3036     TCGv_i64 t1, t2;
3037 
3038     /* Only one register to read. */
3039     t1 = tcg_temp_new_i64();
3040     if (unlikely(r1 == r3)) {
3041         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3042         store_reg32h_i64(r1, t1);
3043         return DISAS_NEXT;
3044     }
3045 
3046     /* First load the values of the first and last registers to trigger
3047        possible page faults. */
3048     t2 = tcg_temp_new_i64();
3049     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3050     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3051     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3052     store_reg32h_i64(r1, t1);
3053     store_reg32h_i64(r3, t2);
3054 
3055     /* Only two registers to read. */
3056     if (((r1 + 1) & 15) == r3) {
3057         return DISAS_NEXT;
3058     }
3059 
3060     /* Then load the remaining registers. Page fault can't occur. */
3061     r3 = (r3 - 1) & 15;
3062     tcg_gen_movi_i64(t2, 4);
3063     while (r1 != r3) {
3064         r1 = (r1 + 1) & 15;
3065         tcg_gen_add_i64(o->in2, o->in2, t2);
3066         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3067         store_reg32h_i64(r1, t1);
3068     }
3069     return DISAS_NEXT;
3070 }
3071 
3072 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3073 {
3074     int r1 = get_field(s, r1);
3075     int r3 = get_field(s, r3);
3076     TCGv_i64 t1, t2;
3077 
3078     /* Only one register to read. */
3079     if (unlikely(r1 == r3)) {
3080         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3081         return DISAS_NEXT;
3082     }
3083 
3084     /* First load the values of the first and last registers to trigger
3085        possible page faults. */
3086     t1 = tcg_temp_new_i64();
3087     t2 = tcg_temp_new_i64();
3088     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3089     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3090     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3091     tcg_gen_mov_i64(regs[r1], t1);
3092 
3093     /* Only two registers to read. */
3094     if (((r1 + 1) & 15) == r3) {
3095         return DISAS_NEXT;
3096     }
3097 
3098     /* Then load the remaining registers. Page fault can't occur. */
3099     r3 = (r3 - 1) & 15;
3100     tcg_gen_movi_i64(t1, 8);
3101     while (r1 != r3) {
3102         r1 = (r1 + 1) & 15;
3103         tcg_gen_add_i64(o->in2, o->in2, t1);
3104         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3105     }
3106     return DISAS_NEXT;
3107 }
3108 
3109 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3110 {
3111     TCGv_i64 a1, a2;
3112     MemOp mop = s->insn->data;
3113 
3114     /* In a parallel context, stop the world and single step.  */
3115     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3116         update_psw_addr(s);
3117         update_cc_op(s);
3118         gen_exception(EXCP_ATOMIC);
3119         return DISAS_NORETURN;
3120     }
3121 
3122     /* In a serial context, perform the two loads ... */
3123     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3124     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3125     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3126     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3127 
3128     /* ... and indicate that we performed them while interlocked.  */
3129     gen_op_movi_cc(s, 0);
3130     return DISAS_NEXT;
3131 }
3132 
3133 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3134 {
3135     o->out_128 = tcg_temp_new_i128();
3136     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3137                          MO_TE | MO_128 | MO_ALIGN);
3138     return DISAS_NEXT;
3139 }
3140 
3141 #ifndef CONFIG_USER_ONLY
3142 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3143 {
3144     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3145     return DISAS_NEXT;
3146 }
3147 #endif
3148 
3149 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3150 {
3151     tcg_gen_andi_i64(o->out, o->in2, -256);
3152     return DISAS_NEXT;
3153 }
3154 
3155 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3156 {
3157     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3158 
3159     if (get_field(s, m3) > 6) {
3160         gen_program_exception(s, PGM_SPECIFICATION);
3161         return DISAS_NORETURN;
3162     }
3163 
3164     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3165     tcg_gen_neg_i64(o->addr1, o->addr1);
3166     tcg_gen_movi_i64(o->out, 16);
3167     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3168     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3169     return DISAS_NEXT;
3170 }
3171 
3172 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3173 {
3174     const uint16_t monitor_class = get_field(s, i2);
3175 
3176     if (monitor_class & 0xff00) {
3177         gen_program_exception(s, PGM_SPECIFICATION);
3178         return DISAS_NORETURN;
3179     }
3180 
3181 #if !defined(CONFIG_USER_ONLY)
3182     gen_helper_monitor_call(cpu_env, o->addr1,
3183                             tcg_constant_i32(monitor_class));
3184 #endif
3185     /* Defaults to a NOP. */
3186     return DISAS_NEXT;
3187 }
3188 
3189 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3190 {
3191     o->out = o->in2;
3192     o->in2 = NULL;
3193     return DISAS_NEXT;
3194 }
3195 
3196 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3197 {
3198     int b2 = get_field(s, b2);
3199     TCGv ar1 = tcg_temp_new_i64();
3200 
3201     o->out = o->in2;
3202     o->in2 = NULL;
3203 
3204     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3205     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3206         tcg_gen_movi_i64(ar1, 0);
3207         break;
3208     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3209         tcg_gen_movi_i64(ar1, 1);
3210         break;
3211     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3212         if (b2) {
3213             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3214         } else {
3215             tcg_gen_movi_i64(ar1, 0);
3216         }
3217         break;
3218     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3219         tcg_gen_movi_i64(ar1, 2);
3220         break;
3221     }
3222 
3223     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3224     return DISAS_NEXT;
3225 }
3226 
3227 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3228 {
3229     o->out = o->in1;
3230     o->out2 = o->in2;
3231     o->in1 = NULL;
3232     o->in2 = NULL;
3233     return DISAS_NEXT;
3234 }
3235 
3236 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3237 {
3238     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3239 
3240     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3241     return DISAS_NEXT;
3242 }
3243 
3244 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3245 {
3246     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3247     return DISAS_NEXT;
3248 }
3249 
3250 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3251 {
3252     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3253 
3254     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3255     return DISAS_NEXT;
3256 }
3257 
3258 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3259 {
3260     int r1 = get_field(s, r1);
3261     int r2 = get_field(s, r2);
3262     TCGv_i32 t1, t2;
3263 
3264     /* r1 and r2 must be even.  */
3265     if (r1 & 1 || r2 & 1) {
3266         gen_program_exception(s, PGM_SPECIFICATION);
3267         return DISAS_NORETURN;
3268     }
3269 
3270     t1 = tcg_constant_i32(r1);
3271     t2 = tcg_constant_i32(r2);
3272     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3273     set_cc_static(s);
3274     return DISAS_NEXT;
3275 }
3276 
3277 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3278 {
3279     int r1 = get_field(s, r1);
3280     int r3 = get_field(s, r3);
3281     TCGv_i32 t1, t3;
3282 
3283     /* r1 and r3 must be even.  */
3284     if (r1 & 1 || r3 & 1) {
3285         gen_program_exception(s, PGM_SPECIFICATION);
3286         return DISAS_NORETURN;
3287     }
3288 
3289     t1 = tcg_constant_i32(r1);
3290     t3 = tcg_constant_i32(r3);
3291     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3292     set_cc_static(s);
3293     return DISAS_NEXT;
3294 }
3295 
3296 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3297 {
3298     int r1 = get_field(s, r1);
3299     int r3 = get_field(s, r3);
3300     TCGv_i32 t1, t3;
3301 
3302     /* r1 and r3 must be even.  */
3303     if (r1 & 1 || r3 & 1) {
3304         gen_program_exception(s, PGM_SPECIFICATION);
3305         return DISAS_NORETURN;
3306     }
3307 
3308     t1 = tcg_constant_i32(r1);
3309     t3 = tcg_constant_i32(r3);
3310     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3311     set_cc_static(s);
3312     return DISAS_NEXT;
3313 }
3314 
3315 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3316 {
3317     int r3 = get_field(s, r3);
3318     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3319     set_cc_static(s);
3320     return DISAS_NEXT;
3321 }
3322 
3323 #ifndef CONFIG_USER_ONLY
3324 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3325 {
3326     int r1 = get_field(s, l1);
3327     int r3 = get_field(s, r3);
3328     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3329     set_cc_static(s);
3330     return DISAS_NEXT;
3331 }
3332 
3333 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3334 {
3335     int r1 = get_field(s, l1);
3336     int r3 = get_field(s, r3);
3337     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3338     set_cc_static(s);
3339     return DISAS_NEXT;
3340 }
3341 #endif
3342 
3343 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3344 {
3345     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3346 
3347     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3348     return DISAS_NEXT;
3349 }
3350 
3351 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3352 {
3353     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3354 
3355     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3356     return DISAS_NEXT;
3357 }
3358 
3359 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3360 {
3361     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3362     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3363 
3364     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3365     set_cc_static(s);
3366     return DISAS_NEXT;
3367 }
3368 
3369 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3370 {
3371     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3372     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3373 
3374     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3375     set_cc_static(s);
3376     return DISAS_NEXT;
3377 }
3378 
3379 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3380 {
3381     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3382 
3383     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3384     return DISAS_NEXT;
3385 }
3386 
3387 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3388 {
3389     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3390     return DISAS_NEXT;
3391 }
3392 
3393 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3394 {
3395     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3396     return DISAS_NEXT;
3397 }
3398 
3399 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3400 {
3401     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3402     return DISAS_NEXT;
3403 }
3404 
3405 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3406 {
3407     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3408     return DISAS_NEXT;
3409 }
3410 
3411 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3412 {
3413     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3414     return DISAS_NEXT;
3415 }
3416 
3417 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3418 {
3419     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3420     return DISAS_NEXT;
3421 }
3422 
3423 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3424 {
3425     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3426     return DISAS_NEXT;
3427 }
3428 
3429 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3430 {
3431     gen_helper_mxdb(o->out_128, cpu_env, o->in1, o->in2);
3432     return DISAS_NEXT;
3433 }
3434 
3435 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3436 {
3437     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3438     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3439     return DISAS_NEXT;
3440 }
3441 
3442 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3443 {
3444     TCGv_i64 r3 = load_freg(get_field(s, r3));
3445     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3446     return DISAS_NEXT;
3447 }
3448 
3449 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3450 {
3451     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3452     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3453     return DISAS_NEXT;
3454 }
3455 
3456 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3457 {
3458     TCGv_i64 r3 = load_freg(get_field(s, r3));
3459     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3460     return DISAS_NEXT;
3461 }
3462 
3463 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3464 {
3465     TCGv_i64 z = tcg_constant_i64(0);
3466     TCGv_i64 n = tcg_temp_new_i64();
3467 
3468     tcg_gen_neg_i64(n, o->in2);
3469     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3470     return DISAS_NEXT;
3471 }
3472 
3473 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3474 {
3475     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3476     return DISAS_NEXT;
3477 }
3478 
3479 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3480 {
3481     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3482     return DISAS_NEXT;
3483 }
3484 
3485 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3486 {
3487     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3488     tcg_gen_mov_i64(o->out2, o->in2);
3489     return DISAS_NEXT;
3490 }
3491 
3492 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3493 {
3494     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3495 
3496     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3497     set_cc_static(s);
3498     return DISAS_NEXT;
3499 }
3500 
3501 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3502 {
3503     tcg_gen_neg_i64(o->out, o->in2);
3504     return DISAS_NEXT;
3505 }
3506 
3507 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3508 {
3509     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3510     return DISAS_NEXT;
3511 }
3512 
3513 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3514 {
3515     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3516     return DISAS_NEXT;
3517 }
3518 
3519 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3520 {
3521     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3522     tcg_gen_mov_i64(o->out2, o->in2);
3523     return DISAS_NEXT;
3524 }
3525 
3526 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3527 {
3528     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3529 
3530     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3531     set_cc_static(s);
3532     return DISAS_NEXT;
3533 }
3534 
3535 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3536 {
3537     tcg_gen_or_i64(o->out, o->in1, o->in2);
3538     return DISAS_NEXT;
3539 }
3540 
3541 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3542 {
3543     int shift = s->insn->data & 0xff;
3544     int size = s->insn->data >> 8;
3545     uint64_t mask = ((1ull << size) - 1) << shift;
3546     TCGv_i64 t = tcg_temp_new_i64();
3547 
3548     tcg_gen_shli_i64(t, o->in2, shift);
3549     tcg_gen_or_i64(o->out, o->in1, t);
3550 
3551     /* Produce the CC from only the bits manipulated.  */
3552     tcg_gen_andi_i64(cc_dst, o->out, mask);
3553     set_cc_nz_u64(s, cc_dst);
3554     return DISAS_NEXT;
3555 }
3556 
3557 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3558 {
3559     o->in1 = tcg_temp_new_i64();
3560 
3561     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3562         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3563     } else {
3564         /* Perform the atomic operation in memory. */
3565         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3566                                     s->insn->data);
3567     }
3568 
3569     /* Recompute also for atomic case: needed for setting CC. */
3570     tcg_gen_or_i64(o->out, o->in1, o->in2);
3571 
3572     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3573         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3574     }
3575     return DISAS_NEXT;
3576 }
3577 
3578 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3579 {
3580     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3581 
3582     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3583     return DISAS_NEXT;
3584 }
3585 
3586 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3587 {
3588     int l2 = get_field(s, l2) + 1;
3589     TCGv_i32 l;
3590 
3591     /* The length must not exceed 32 bytes.  */
3592     if (l2 > 32) {
3593         gen_program_exception(s, PGM_SPECIFICATION);
3594         return DISAS_NORETURN;
3595     }
3596     l = tcg_constant_i32(l2);
3597     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3598     return DISAS_NEXT;
3599 }
3600 
3601 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3602 {
3603     int l2 = get_field(s, l2) + 1;
3604     TCGv_i32 l;
3605 
3606     /* The length must be even and should not exceed 64 bytes.  */
3607     if ((l2 & 1) || (l2 > 64)) {
3608         gen_program_exception(s, PGM_SPECIFICATION);
3609         return DISAS_NORETURN;
3610     }
3611     l = tcg_constant_i32(l2);
3612     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3613     return DISAS_NEXT;
3614 }
3615 
3616 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3617 {
3618     const uint8_t m3 = get_field(s, m3);
3619 
3620     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3621         tcg_gen_ctpop_i64(o->out, o->in2);
3622     } else {
3623         gen_helper_popcnt(o->out, o->in2);
3624     }
3625     return DISAS_NEXT;
3626 }
3627 
3628 #ifndef CONFIG_USER_ONLY
3629 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3630 {
3631     gen_helper_ptlb(cpu_env);
3632     return DISAS_NEXT;
3633 }
3634 #endif
3635 
3636 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3637 {
3638     int i3 = get_field(s, i3);
3639     int i4 = get_field(s, i4);
3640     int i5 = get_field(s, i5);
3641     int do_zero = i4 & 0x80;
3642     uint64_t mask, imask, pmask;
3643     int pos, len, rot;
3644 
3645     /* Adjust the arguments for the specific insn.  */
3646     switch (s->fields.op2) {
3647     case 0x55: /* risbg */
3648     case 0x59: /* risbgn */
3649         i3 &= 63;
3650         i4 &= 63;
3651         pmask = ~0;
3652         break;
3653     case 0x5d: /* risbhg */
3654         i3 &= 31;
3655         i4 &= 31;
3656         pmask = 0xffffffff00000000ull;
3657         break;
3658     case 0x51: /* risblg */
3659         i3 = (i3 & 31) + 32;
3660         i4 = (i4 & 31) + 32;
3661         pmask = 0x00000000ffffffffull;
3662         break;
3663     default:
3664         g_assert_not_reached();
3665     }
3666 
3667     /* MASK is the set of bits to be inserted from R2. */
3668     if (i3 <= i4) {
3669         /* [0...i3---i4...63] */
3670         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3671     } else {
3672         /* [0---i4...i3---63] */
3673         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3674     }
3675     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3676     mask &= pmask;
3677 
3678     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3679        insns, we need to keep the other half of the register.  */
3680     imask = ~mask | ~pmask;
3681     if (do_zero) {
3682         imask = ~pmask;
3683     }
3684 
3685     len = i4 - i3 + 1;
3686     pos = 63 - i4;
3687     rot = i5 & 63;
3688 
3689     /* In some cases we can implement this with extract.  */
3690     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3691         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3692         return DISAS_NEXT;
3693     }
3694 
3695     /* In some cases we can implement this with deposit.  */
3696     if (len > 0 && (imask == 0 || ~mask == imask)) {
3697         /* Note that we rotate the bits to be inserted to the lsb, not to
3698            the position as described in the PoO.  */
3699         rot = (rot - pos) & 63;
3700     } else {
3701         pos = -1;
3702     }
3703 
3704     /* Rotate the input as necessary.  */
3705     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3706 
3707     /* Insert the selected bits into the output.  */
3708     if (pos >= 0) {
3709         if (imask == 0) {
3710             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3711         } else {
3712             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3713         }
3714     } else if (imask == 0) {
3715         tcg_gen_andi_i64(o->out, o->in2, mask);
3716     } else {
3717         tcg_gen_andi_i64(o->in2, o->in2, mask);
3718         tcg_gen_andi_i64(o->out, o->out, imask);
3719         tcg_gen_or_i64(o->out, o->out, o->in2);
3720     }
3721     return DISAS_NEXT;
3722 }
3723 
3724 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3725 {
3726     int i3 = get_field(s, i3);
3727     int i4 = get_field(s, i4);
3728     int i5 = get_field(s, i5);
3729     TCGv_i64 orig_out;
3730     uint64_t mask;
3731 
3732     /* If this is a test-only form, arrange to discard the result.  */
3733     if (i3 & 0x80) {
3734         tcg_debug_assert(o->out != NULL);
3735         orig_out = o->out;
3736         o->out = tcg_temp_new_i64();
3737         tcg_gen_mov_i64(o->out, orig_out);
3738     }
3739 
3740     i3 &= 63;
3741     i4 &= 63;
3742     i5 &= 63;
3743 
3744     /* MASK is the set of bits to be operated on from R2.
3745        Take care for I3/I4 wraparound.  */
3746     mask = ~0ull >> i3;
3747     if (i3 <= i4) {
3748         mask ^= ~0ull >> i4 >> 1;
3749     } else {
3750         mask |= ~(~0ull >> i4 >> 1);
3751     }
3752 
3753     /* Rotate the input as necessary.  */
3754     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3755 
3756     /* Operate.  */
3757     switch (s->fields.op2) {
3758     case 0x54: /* AND */
3759         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3760         tcg_gen_and_i64(o->out, o->out, o->in2);
3761         break;
3762     case 0x56: /* OR */
3763         tcg_gen_andi_i64(o->in2, o->in2, mask);
3764         tcg_gen_or_i64(o->out, o->out, o->in2);
3765         break;
3766     case 0x57: /* XOR */
3767         tcg_gen_andi_i64(o->in2, o->in2, mask);
3768         tcg_gen_xor_i64(o->out, o->out, o->in2);
3769         break;
3770     default:
3771         abort();
3772     }
3773 
3774     /* Set the CC.  */
3775     tcg_gen_andi_i64(cc_dst, o->out, mask);
3776     set_cc_nz_u64(s, cc_dst);
3777     return DISAS_NEXT;
3778 }
3779 
3780 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3781 {
3782     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3783     return DISAS_NEXT;
3784 }
3785 
3786 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3787 {
3788     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3789     return DISAS_NEXT;
3790 }
3791 
3792 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3793 {
3794     tcg_gen_bswap64_i64(o->out, o->in2);
3795     return DISAS_NEXT;
3796 }
3797 
3798 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3799 {
3800     TCGv_i32 t1 = tcg_temp_new_i32();
3801     TCGv_i32 t2 = tcg_temp_new_i32();
3802     TCGv_i32 to = tcg_temp_new_i32();
3803     tcg_gen_extrl_i64_i32(t1, o->in1);
3804     tcg_gen_extrl_i64_i32(t2, o->in2);
3805     tcg_gen_rotl_i32(to, t1, t2);
3806     tcg_gen_extu_i32_i64(o->out, to);
3807     return DISAS_NEXT;
3808 }
3809 
3810 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3811 {
3812     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3813     return DISAS_NEXT;
3814 }
3815 
3816 #ifndef CONFIG_USER_ONLY
3817 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3818 {
3819     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3820     set_cc_static(s);
3821     return DISAS_NEXT;
3822 }
3823 
3824 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3825 {
3826     gen_helper_sacf(cpu_env, o->in2);
3827     /* Addressing mode has changed, so end the block.  */
3828     return DISAS_TOO_MANY;
3829 }
3830 #endif
3831 
3832 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3833 {
3834     int sam = s->insn->data;
3835     TCGv_i64 tsam;
3836     uint64_t mask;
3837 
3838     switch (sam) {
3839     case 0:
3840         mask = 0xffffff;
3841         break;
3842     case 1:
3843         mask = 0x7fffffff;
3844         break;
3845     default:
3846         mask = -1;
3847         break;
3848     }
3849 
3850     /* Bizarre but true, we check the address of the current insn for the
3851        specification exception, not the next to be executed.  Thus the PoO
3852        documents that Bad Things Happen two bytes before the end.  */
3853     if (s->base.pc_next & ~mask) {
3854         gen_program_exception(s, PGM_SPECIFICATION);
3855         return DISAS_NORETURN;
3856     }
3857     s->pc_tmp &= mask;
3858 
3859     tsam = tcg_constant_i64(sam);
3860     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3861 
3862     /* Always exit the TB, since we (may have) changed execution mode.  */
3863     return DISAS_TOO_MANY;
3864 }
3865 
3866 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3867 {
3868     int r1 = get_field(s, r1);
3869     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3870     return DISAS_NEXT;
3871 }
3872 
3873 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3874 {
3875     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3876     return DISAS_NEXT;
3877 }
3878 
3879 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3880 {
3881     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3882     return DISAS_NEXT;
3883 }
3884 
3885 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3886 {
3887     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3888     return DISAS_NEXT;
3889 }
3890 
3891 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3892 {
3893     gen_helper_sqeb(o->out, cpu_env, o->in2);
3894     return DISAS_NEXT;
3895 }
3896 
3897 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3898 {
3899     gen_helper_sqdb(o->out, cpu_env, o->in2);
3900     return DISAS_NEXT;
3901 }
3902 
3903 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3904 {
3905     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
3906     return DISAS_NEXT;
3907 }
3908 
3909 #ifndef CONFIG_USER_ONLY
3910 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3911 {
3912     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3913     set_cc_static(s);
3914     return DISAS_NEXT;
3915 }
3916 
3917 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3918 {
3919     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3920     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3921 
3922     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3923     set_cc_static(s);
3924     return DISAS_NEXT;
3925 }
3926 #endif
3927 
3928 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3929 {
3930     DisasCompare c;
3931     TCGv_i64 a, h;
3932     TCGLabel *lab;
3933     int r1;
3934 
3935     disas_jcc(s, &c, get_field(s, m3));
3936 
3937     /* We want to store when the condition is fulfilled, so branch
3938        out when it's not */
3939     c.cond = tcg_invert_cond(c.cond);
3940 
3941     lab = gen_new_label();
3942     if (c.is_64) {
3943         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3944     } else {
3945         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3946     }
3947 
3948     r1 = get_field(s, r1);
3949     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3950     switch (s->insn->data) {
3951     case 1: /* STOCG */
3952         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3953         break;
3954     case 0: /* STOC */
3955         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3956         break;
3957     case 2: /* STOCFH */
3958         h = tcg_temp_new_i64();
3959         tcg_gen_shri_i64(h, regs[r1], 32);
3960         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3961         break;
3962     default:
3963         g_assert_not_reached();
3964     }
3965 
3966     gen_set_label(lab);
3967     return DISAS_NEXT;
3968 }
3969 
3970 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3971 {
3972     TCGv_i64 t;
3973     uint64_t sign = 1ull << s->insn->data;
3974     if (s->insn->data == 31) {
3975         t = tcg_temp_new_i64();
3976         tcg_gen_shli_i64(t, o->in1, 32);
3977     } else {
3978         t = o->in1;
3979     }
3980     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3981     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3982     /* The arithmetic left shift is curious in that it does not affect
3983        the sign bit.  Copy that over from the source unchanged.  */
3984     tcg_gen_andi_i64(o->out, o->out, ~sign);
3985     tcg_gen_andi_i64(o->in1, o->in1, sign);
3986     tcg_gen_or_i64(o->out, o->out, o->in1);
3987     return DISAS_NEXT;
3988 }
3989 
3990 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3991 {
3992     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3993     return DISAS_NEXT;
3994 }
3995 
3996 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3997 {
3998     tcg_gen_sar_i64(o->out, o->in1, o->in2);
3999     return DISAS_NEXT;
4000 }
4001 
4002 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4003 {
4004     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4005     return DISAS_NEXT;
4006 }
4007 
4008 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4009 {
4010     gen_helper_sfpc(cpu_env, o->in2);
4011     return DISAS_NEXT;
4012 }
4013 
4014 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4015 {
4016     gen_helper_sfas(cpu_env, o->in2);
4017     return DISAS_NEXT;
4018 }
4019 
4020 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4021 {
4022     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4023     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4024     gen_helper_srnm(cpu_env, o->addr1);
4025     return DISAS_NEXT;
4026 }
4027 
4028 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4029 {
4030     /* Bits 0-55 are are ignored. */
4031     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4032     gen_helper_srnm(cpu_env, o->addr1);
4033     return DISAS_NEXT;
4034 }
4035 
4036 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4037 {
4038     TCGv_i64 tmp = tcg_temp_new_i64();
4039 
4040     /* Bits other than 61-63 are ignored. */
4041     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4042 
4043     /* No need to call a helper, we don't implement dfp */
4044     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4045     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4046     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4047     return DISAS_NEXT;
4048 }
4049 
4050 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4051 {
4052     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4053     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4054     set_cc_static(s);
4055 
4056     tcg_gen_shri_i64(o->in1, o->in1, 24);
4057     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4058     return DISAS_NEXT;
4059 }
4060 
4061 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4062 {
4063     int b1 = get_field(s, b1);
4064     int d1 = get_field(s, d1);
4065     int b2 = get_field(s, b2);
4066     int d2 = get_field(s, d2);
4067     int r3 = get_field(s, r3);
4068     TCGv_i64 tmp = tcg_temp_new_i64();
4069 
4070     /* fetch all operands first */
4071     o->in1 = tcg_temp_new_i64();
4072     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4073     o->in2 = tcg_temp_new_i64();
4074     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4075     o->addr1 = tcg_temp_new_i64();
4076     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4077 
4078     /* load the third operand into r3 before modifying anything */
4079     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4080 
4081     /* subtract CPU timer from first operand and store in GR0 */
4082     gen_helper_stpt(tmp, cpu_env);
4083     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4084 
4085     /* store second operand in GR1 */
4086     tcg_gen_mov_i64(regs[1], o->in2);
4087     return DISAS_NEXT;
4088 }
4089 
4090 #ifndef CONFIG_USER_ONLY
4091 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4092 {
4093     tcg_gen_shri_i64(o->in2, o->in2, 4);
4094     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4095     return DISAS_NEXT;
4096 }
4097 
4098 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4099 {
4100     gen_helper_sske(cpu_env, o->in1, o->in2);
4101     return DISAS_NEXT;
4102 }
4103 
4104 static void gen_check_psw_mask(DisasContext *s)
4105 {
4106     TCGv_i64 reserved = tcg_temp_new_i64();
4107     TCGLabel *ok = gen_new_label();
4108 
4109     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4110     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4111     gen_program_exception(s, PGM_SPECIFICATION);
4112     gen_set_label(ok);
4113 }
4114 
4115 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4116 {
4117     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4118 
4119     gen_check_psw_mask(s);
4120 
4121     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4122     s->exit_to_mainloop = true;
4123     return DISAS_TOO_MANY;
4124 }
4125 
4126 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4127 {
4128     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4129     return DISAS_NEXT;
4130 }
4131 #endif
4132 
4133 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4134 {
4135     gen_helper_stck(o->out, cpu_env);
4136     /* ??? We don't implement clock states.  */
4137     gen_op_movi_cc(s, 0);
4138     return DISAS_NEXT;
4139 }
4140 
4141 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4142 {
4143     TCGv_i64 c1 = tcg_temp_new_i64();
4144     TCGv_i64 c2 = tcg_temp_new_i64();
4145     TCGv_i64 todpr = tcg_temp_new_i64();
4146     gen_helper_stck(c1, cpu_env);
4147     /* 16 bit value store in an uint32_t (only valid bits set) */
4148     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4149     /* Shift the 64-bit value into its place as a zero-extended
4150        104-bit value.  Note that "bit positions 64-103 are always
4151        non-zero so that they compare differently to STCK"; we set
4152        the least significant bit to 1.  */
4153     tcg_gen_shli_i64(c2, c1, 56);
4154     tcg_gen_shri_i64(c1, c1, 8);
4155     tcg_gen_ori_i64(c2, c2, 0x10000);
4156     tcg_gen_or_i64(c2, c2, todpr);
4157     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4158     tcg_gen_addi_i64(o->in2, o->in2, 8);
4159     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4160     /* ??? We don't implement clock states.  */
4161     gen_op_movi_cc(s, 0);
4162     return DISAS_NEXT;
4163 }
4164 
4165 #ifndef CONFIG_USER_ONLY
4166 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4167 {
4168     gen_helper_sck(cc_op, cpu_env, o->in2);
4169     set_cc_static(s);
4170     return DISAS_NEXT;
4171 }
4172 
4173 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4174 {
4175     gen_helper_sckc(cpu_env, o->in2);
4176     return DISAS_NEXT;
4177 }
4178 
4179 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4180 {
4181     gen_helper_sckpf(cpu_env, regs[0]);
4182     return DISAS_NEXT;
4183 }
4184 
4185 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4186 {
4187     gen_helper_stckc(o->out, cpu_env);
4188     return DISAS_NEXT;
4189 }
4190 
4191 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4192 {
4193     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4194     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4195 
4196     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4197     return DISAS_NEXT;
4198 }
4199 
4200 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4201 {
4202     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4203     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4204 
4205     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4206     return DISAS_NEXT;
4207 }
4208 
4209 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4210 {
4211     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4212     return DISAS_NEXT;
4213 }
4214 
4215 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4216 {
4217     gen_helper_spt(cpu_env, o->in2);
4218     return DISAS_NEXT;
4219 }
4220 
4221 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4222 {
4223     gen_helper_stfl(cpu_env);
4224     return DISAS_NEXT;
4225 }
4226 
4227 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4228 {
4229     gen_helper_stpt(o->out, cpu_env);
4230     return DISAS_NEXT;
4231 }
4232 
4233 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4234 {
4235     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4236     set_cc_static(s);
4237     return DISAS_NEXT;
4238 }
4239 
4240 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4241 {
4242     gen_helper_spx(cpu_env, o->in2);
4243     return DISAS_NEXT;
4244 }
4245 
4246 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4247 {
4248     gen_helper_xsch(cpu_env, regs[1]);
4249     set_cc_static(s);
4250     return DISAS_NEXT;
4251 }
4252 
4253 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4254 {
4255     gen_helper_csch(cpu_env, regs[1]);
4256     set_cc_static(s);
4257     return DISAS_NEXT;
4258 }
4259 
4260 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4261 {
4262     gen_helper_hsch(cpu_env, regs[1]);
4263     set_cc_static(s);
4264     return DISAS_NEXT;
4265 }
4266 
4267 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4268 {
4269     gen_helper_msch(cpu_env, regs[1], o->in2);
4270     set_cc_static(s);
4271     return DISAS_NEXT;
4272 }
4273 
4274 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4275 {
4276     gen_helper_rchp(cpu_env, regs[1]);
4277     set_cc_static(s);
4278     return DISAS_NEXT;
4279 }
4280 
4281 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4282 {
4283     gen_helper_rsch(cpu_env, regs[1]);
4284     set_cc_static(s);
4285     return DISAS_NEXT;
4286 }
4287 
4288 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4289 {
4290     gen_helper_sal(cpu_env, regs[1]);
4291     return DISAS_NEXT;
4292 }
4293 
4294 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4295 {
4296     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4297     return DISAS_NEXT;
4298 }
4299 
4300 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4301 {
4302     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4303     gen_op_movi_cc(s, 3);
4304     return DISAS_NEXT;
4305 }
4306 
4307 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4308 {
4309     /* The instruction is suppressed if not provided. */
4310     return DISAS_NEXT;
4311 }
4312 
4313 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4314 {
4315     gen_helper_ssch(cpu_env, regs[1], o->in2);
4316     set_cc_static(s);
4317     return DISAS_NEXT;
4318 }
4319 
4320 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4321 {
4322     gen_helper_stsch(cpu_env, regs[1], o->in2);
4323     set_cc_static(s);
4324     return DISAS_NEXT;
4325 }
4326 
4327 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4328 {
4329     gen_helper_stcrw(cpu_env, o->in2);
4330     set_cc_static(s);
4331     return DISAS_NEXT;
4332 }
4333 
4334 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4335 {
4336     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4337     set_cc_static(s);
4338     return DISAS_NEXT;
4339 }
4340 
4341 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4342 {
4343     gen_helper_tsch(cpu_env, regs[1], o->in2);
4344     set_cc_static(s);
4345     return DISAS_NEXT;
4346 }
4347 
4348 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4349 {
4350     gen_helper_chsc(cpu_env, o->in2);
4351     set_cc_static(s);
4352     return DISAS_NEXT;
4353 }
4354 
4355 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4356 {
4357     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4358     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4359     return DISAS_NEXT;
4360 }
4361 
4362 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4363 {
4364     uint64_t i2 = get_field(s, i2);
4365     TCGv_i64 t;
4366 
4367     /* It is important to do what the instruction name says: STORE THEN.
4368        If we let the output hook perform the store then if we fault and
4369        restart, we'll have the wrong SYSTEM MASK in place.  */
4370     t = tcg_temp_new_i64();
4371     tcg_gen_shri_i64(t, psw_mask, 56);
4372     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4373 
4374     if (s->fields.op == 0xac) {
4375         tcg_gen_andi_i64(psw_mask, psw_mask,
4376                          (i2 << 56) | 0x00ffffffffffffffull);
4377     } else {
4378         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4379     }
4380 
4381     gen_check_psw_mask(s);
4382 
4383     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4384     s->exit_to_mainloop = true;
4385     return DISAS_TOO_MANY;
4386 }
4387 
4388 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4389 {
4390     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4391 
4392     if (s->base.tb->flags & FLAG_MASK_PER) {
4393         update_psw_addr(s);
4394         gen_helper_per_store_real(cpu_env);
4395     }
4396     return DISAS_NEXT;
4397 }
4398 #endif
4399 
4400 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4401 {
4402     gen_helper_stfle(cc_op, cpu_env, o->in2);
4403     set_cc_static(s);
4404     return DISAS_NEXT;
4405 }
4406 
4407 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4408 {
4409     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4410     return DISAS_NEXT;
4411 }
4412 
4413 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4414 {
4415     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4416     return DISAS_NEXT;
4417 }
4418 
4419 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4420 {
4421     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4422                        MO_TEUL | s->insn->data);
4423     return DISAS_NEXT;
4424 }
4425 
4426 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4427 {
4428     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4429                         MO_TEUQ | s->insn->data);
4430     return DISAS_NEXT;
4431 }
4432 
4433 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4434 {
4435     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4436     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4437 
4438     gen_helper_stam(cpu_env, r1, o->in2, r3);
4439     return DISAS_NEXT;
4440 }
4441 
4442 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4443 {
4444     int m3 = get_field(s, m3);
4445     int pos, base = s->insn->data;
4446     TCGv_i64 tmp = tcg_temp_new_i64();
4447 
4448     pos = base + ctz32(m3) * 8;
4449     switch (m3) {
4450     case 0xf:
4451         /* Effectively a 32-bit store.  */
4452         tcg_gen_shri_i64(tmp, o->in1, pos);
4453         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4454         break;
4455 
4456     case 0xc:
4457     case 0x6:
4458     case 0x3:
4459         /* Effectively a 16-bit store.  */
4460         tcg_gen_shri_i64(tmp, o->in1, pos);
4461         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4462         break;
4463 
4464     case 0x8:
4465     case 0x4:
4466     case 0x2:
4467     case 0x1:
4468         /* Effectively an 8-bit store.  */
4469         tcg_gen_shri_i64(tmp, o->in1, pos);
4470         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4471         break;
4472 
4473     default:
4474         /* This is going to be a sequence of shifts and stores.  */
4475         pos = base + 32 - 8;
4476         while (m3) {
4477             if (m3 & 0x8) {
4478                 tcg_gen_shri_i64(tmp, o->in1, pos);
4479                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4480                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4481             }
4482             m3 = (m3 << 1) & 0xf;
4483             pos -= 8;
4484         }
4485         break;
4486     }
4487     return DISAS_NEXT;
4488 }
4489 
4490 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4491 {
4492     int r1 = get_field(s, r1);
4493     int r3 = get_field(s, r3);
4494     int size = s->insn->data;
4495     TCGv_i64 tsize = tcg_constant_i64(size);
4496 
4497     while (1) {
4498         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4499                             size == 8 ? MO_TEUQ : MO_TEUL);
4500         if (r1 == r3) {
4501             break;
4502         }
4503         tcg_gen_add_i64(o->in2, o->in2, tsize);
4504         r1 = (r1 + 1) & 15;
4505     }
4506 
4507     return DISAS_NEXT;
4508 }
4509 
4510 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4511 {
4512     int r1 = get_field(s, r1);
4513     int r3 = get_field(s, r3);
4514     TCGv_i64 t = tcg_temp_new_i64();
4515     TCGv_i64 t4 = tcg_constant_i64(4);
4516     TCGv_i64 t32 = tcg_constant_i64(32);
4517 
4518     while (1) {
4519         tcg_gen_shl_i64(t, regs[r1], t32);
4520         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4521         if (r1 == r3) {
4522             break;
4523         }
4524         tcg_gen_add_i64(o->in2, o->in2, t4);
4525         r1 = (r1 + 1) & 15;
4526     }
4527     return DISAS_NEXT;
4528 }
4529 
4530 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4531 {
4532     TCGv_i128 t16 = tcg_temp_new_i128();
4533 
4534     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4535     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4536                          MO_TE | MO_128 | MO_ALIGN);
4537     return DISAS_NEXT;
4538 }
4539 
4540 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4541 {
4542     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4543     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4544 
4545     gen_helper_srst(cpu_env, r1, r2);
4546     set_cc_static(s);
4547     return DISAS_NEXT;
4548 }
4549 
4550 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4551 {
4552     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4553     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4554 
4555     gen_helper_srstu(cpu_env, r1, r2);
4556     set_cc_static(s);
4557     return DISAS_NEXT;
4558 }
4559 
4560 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4561 {
4562     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4563     return DISAS_NEXT;
4564 }
4565 
4566 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4567 {
4568     tcg_gen_movi_i64(cc_src, 0);
4569     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4570     return DISAS_NEXT;
4571 }
4572 
4573 /* Compute borrow (0, -1) into cc_src. */
4574 static void compute_borrow(DisasContext *s)
4575 {
4576     switch (s->cc_op) {
4577     case CC_OP_SUBU:
4578         /* The borrow value is already in cc_src (0,-1). */
4579         break;
4580     default:
4581         gen_op_calc_cc(s);
4582         /* fall through */
4583     case CC_OP_STATIC:
4584         /* The carry flag is the msb of CC; compute into cc_src. */
4585         tcg_gen_extu_i32_i64(cc_src, cc_op);
4586         tcg_gen_shri_i64(cc_src, cc_src, 1);
4587         /* fall through */
4588     case CC_OP_ADDU:
4589         /* Convert carry (1,0) to borrow (0,-1). */
4590         tcg_gen_subi_i64(cc_src, cc_src, 1);
4591         break;
4592     }
4593 }
4594 
4595 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4596 {
4597     compute_borrow(s);
4598 
4599     /* Borrow is {0, -1}, so add to subtract. */
4600     tcg_gen_add_i64(o->out, o->in1, cc_src);
4601     tcg_gen_sub_i64(o->out, o->out, o->in2);
4602     return DISAS_NEXT;
4603 }
4604 
4605 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4606 {
4607     compute_borrow(s);
4608 
4609     /*
4610      * Borrow is {0, -1}, so add to subtract; replicate the
4611      * borrow input to produce 128-bit -1 for the addition.
4612      */
4613     TCGv_i64 zero = tcg_constant_i64(0);
4614     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4615     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4616 
4617     return DISAS_NEXT;
4618 }
4619 
4620 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4621 {
4622     TCGv_i32 t;
4623 
4624     update_psw_addr(s);
4625     update_cc_op(s);
4626 
4627     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4628     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4629 
4630     t = tcg_constant_i32(s->ilen);
4631     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4632 
4633     gen_exception(EXCP_SVC);
4634     return DISAS_NORETURN;
4635 }
4636 
4637 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4638 {
4639     int cc = 0;
4640 
4641     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4642     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4643     gen_op_movi_cc(s, cc);
4644     return DISAS_NEXT;
4645 }
4646 
4647 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4648 {
4649     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4650     set_cc_static(s);
4651     return DISAS_NEXT;
4652 }
4653 
4654 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4655 {
4656     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4657     set_cc_static(s);
4658     return DISAS_NEXT;
4659 }
4660 
4661 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4662 {
4663     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4664     set_cc_static(s);
4665     return DISAS_NEXT;
4666 }
4667 
4668 #ifndef CONFIG_USER_ONLY
4669 
4670 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4671 {
4672     gen_helper_testblock(cc_op, cpu_env, o->in2);
4673     set_cc_static(s);
4674     return DISAS_NEXT;
4675 }
4676 
4677 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4678 {
4679     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4680     set_cc_static(s);
4681     return DISAS_NEXT;
4682 }
4683 
4684 #endif
4685 
4686 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4687 {
4688     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4689 
4690     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4691     set_cc_static(s);
4692     return DISAS_NEXT;
4693 }
4694 
4695 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4696 {
4697     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4698 
4699     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4700     set_cc_static(s);
4701     return DISAS_NEXT;
4702 }
4703 
4704 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4705 {
4706     TCGv_i128 pair = tcg_temp_new_i128();
4707 
4708     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4709     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4710     set_cc_static(s);
4711     return DISAS_NEXT;
4712 }
4713 
4714 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4715 {
4716     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4717 
4718     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4719     set_cc_static(s);
4720     return DISAS_NEXT;
4721 }
4722 
4723 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4724 {
4725     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4726 
4727     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4728     set_cc_static(s);
4729     return DISAS_NEXT;
4730 }
4731 
4732 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4733 {
4734     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4735     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4736     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4737     TCGv_i32 tst = tcg_temp_new_i32();
4738     int m3 = get_field(s, m3);
4739 
4740     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4741         m3 = 0;
4742     }
4743     if (m3 & 1) {
4744         tcg_gen_movi_i32(tst, -1);
4745     } else {
4746         tcg_gen_extrl_i64_i32(tst, regs[0]);
4747         if (s->insn->opc & 3) {
4748             tcg_gen_ext8u_i32(tst, tst);
4749         } else {
4750             tcg_gen_ext16u_i32(tst, tst);
4751         }
4752     }
4753     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4754 
4755     set_cc_static(s);
4756     return DISAS_NEXT;
4757 }
4758 
4759 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4760 {
4761     TCGv_i32 t1 = tcg_constant_i32(0xff);
4762 
4763     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4764     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4765     set_cc_static(s);
4766     return DISAS_NEXT;
4767 }
4768 
4769 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4770 {
4771     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4772 
4773     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4774     return DISAS_NEXT;
4775 }
4776 
4777 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4778 {
4779     int l1 = get_field(s, l1) + 1;
4780     TCGv_i32 l;
4781 
4782     /* The length must not exceed 32 bytes.  */
4783     if (l1 > 32) {
4784         gen_program_exception(s, PGM_SPECIFICATION);
4785         return DISAS_NORETURN;
4786     }
4787     l = tcg_constant_i32(l1);
4788     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4789     set_cc_static(s);
4790     return DISAS_NEXT;
4791 }
4792 
4793 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4794 {
4795     int l1 = get_field(s, l1) + 1;
4796     TCGv_i32 l;
4797 
4798     /* The length must be even and should not exceed 64 bytes.  */
4799     if ((l1 & 1) || (l1 > 64)) {
4800         gen_program_exception(s, PGM_SPECIFICATION);
4801         return DISAS_NORETURN;
4802     }
4803     l = tcg_constant_i32(l1);
4804     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4805     set_cc_static(s);
4806     return DISAS_NEXT;
4807 }
4808 
4809 
4810 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4811 {
4812     int d1 = get_field(s, d1);
4813     int d2 = get_field(s, d2);
4814     int b1 = get_field(s, b1);
4815     int b2 = get_field(s, b2);
4816     int l = get_field(s, l1);
4817     TCGv_i32 t32;
4818 
4819     o->addr1 = get_address(s, 0, b1, d1);
4820 
4821     /* If the addresses are identical, this is a store/memset of zero.  */
4822     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4823         o->in2 = tcg_constant_i64(0);
4824 
4825         l++;
4826         while (l >= 8) {
4827             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4828             l -= 8;
4829             if (l > 0) {
4830                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4831             }
4832         }
4833         if (l >= 4) {
4834             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4835             l -= 4;
4836             if (l > 0) {
4837                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4838             }
4839         }
4840         if (l >= 2) {
4841             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4842             l -= 2;
4843             if (l > 0) {
4844                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4845             }
4846         }
4847         if (l) {
4848             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4849         }
4850         gen_op_movi_cc(s, 0);
4851         return DISAS_NEXT;
4852     }
4853 
4854     /* But in general we'll defer to a helper.  */
4855     o->in2 = get_address(s, 0, b2, d2);
4856     t32 = tcg_constant_i32(l);
4857     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4858     set_cc_static(s);
4859     return DISAS_NEXT;
4860 }
4861 
4862 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4863 {
4864     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4865     return DISAS_NEXT;
4866 }
4867 
4868 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4869 {
4870     int shift = s->insn->data & 0xff;
4871     int size = s->insn->data >> 8;
4872     uint64_t mask = ((1ull << size) - 1) << shift;
4873     TCGv_i64 t = tcg_temp_new_i64();
4874 
4875     tcg_gen_shli_i64(t, o->in2, shift);
4876     tcg_gen_xor_i64(o->out, o->in1, t);
4877 
4878     /* Produce the CC from only the bits manipulated.  */
4879     tcg_gen_andi_i64(cc_dst, o->out, mask);
4880     set_cc_nz_u64(s, cc_dst);
4881     return DISAS_NEXT;
4882 }
4883 
4884 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4885 {
4886     o->in1 = tcg_temp_new_i64();
4887 
4888     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4889         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4890     } else {
4891         /* Perform the atomic operation in memory. */
4892         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4893                                      s->insn->data);
4894     }
4895 
4896     /* Recompute also for atomic case: needed for setting CC. */
4897     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4898 
4899     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4900         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4901     }
4902     return DISAS_NEXT;
4903 }
4904 
4905 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4906 {
4907     o->out = tcg_constant_i64(0);
4908     return DISAS_NEXT;
4909 }
4910 
4911 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4912 {
4913     o->out = tcg_constant_i64(0);
4914     o->out2 = o->out;
4915     return DISAS_NEXT;
4916 }
4917 
4918 #ifndef CONFIG_USER_ONLY
4919 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4920 {
4921     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4922 
4923     gen_helper_clp(cpu_env, r2);
4924     set_cc_static(s);
4925     return DISAS_NEXT;
4926 }
4927 
4928 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4929 {
4930     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4931     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4932 
4933     gen_helper_pcilg(cpu_env, r1, r2);
4934     set_cc_static(s);
4935     return DISAS_NEXT;
4936 }
4937 
4938 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4939 {
4940     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4941     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4942 
4943     gen_helper_pcistg(cpu_env, r1, r2);
4944     set_cc_static(s);
4945     return DISAS_NEXT;
4946 }
4947 
4948 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4949 {
4950     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4951     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4952 
4953     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4954     set_cc_static(s);
4955     return DISAS_NEXT;
4956 }
4957 
4958 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4959 {
4960     gen_helper_sic(cpu_env, o->in1, o->in2);
4961     return DISAS_NEXT;
4962 }
4963 
4964 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4965 {
4966     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4967     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4968 
4969     gen_helper_rpcit(cpu_env, r1, r2);
4970     set_cc_static(s);
4971     return DISAS_NEXT;
4972 }
4973 
4974 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4975 {
4976     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4977     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4978     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4979 
4980     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4981     set_cc_static(s);
4982     return DISAS_NEXT;
4983 }
4984 
4985 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4986 {
4987     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4988     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4989 
4990     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4991     set_cc_static(s);
4992     return DISAS_NEXT;
4993 }
4994 #endif
4995 
4996 #include "translate_vx.c.inc"
4997 
4998 /* ====================================================================== */
4999 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5000    the original inputs), update the various cc data structures in order to
5001    be able to compute the new condition code.  */
5002 
5003 static void cout_abs32(DisasContext *s, DisasOps *o)
5004 {
5005     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5006 }
5007 
5008 static void cout_abs64(DisasContext *s, DisasOps *o)
5009 {
5010     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5011 }
5012 
5013 static void cout_adds32(DisasContext *s, DisasOps *o)
5014 {
5015     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5016 }
5017 
5018 static void cout_adds64(DisasContext *s, DisasOps *o)
5019 {
5020     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5021 }
5022 
5023 static void cout_addu32(DisasContext *s, DisasOps *o)
5024 {
5025     tcg_gen_shri_i64(cc_src, o->out, 32);
5026     tcg_gen_ext32u_i64(cc_dst, o->out);
5027     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5028 }
5029 
5030 static void cout_addu64(DisasContext *s, DisasOps *o)
5031 {
5032     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5033 }
5034 
5035 static void cout_cmps32(DisasContext *s, DisasOps *o)
5036 {
5037     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5038 }
5039 
5040 static void cout_cmps64(DisasContext *s, DisasOps *o)
5041 {
5042     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5043 }
5044 
5045 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5046 {
5047     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5048 }
5049 
5050 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5051 {
5052     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5053 }
5054 
5055 static void cout_f32(DisasContext *s, DisasOps *o)
5056 {
5057     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5058 }
5059 
5060 static void cout_f64(DisasContext *s, DisasOps *o)
5061 {
5062     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5063 }
5064 
5065 static void cout_f128(DisasContext *s, DisasOps *o)
5066 {
5067     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5068 }
5069 
5070 static void cout_nabs32(DisasContext *s, DisasOps *o)
5071 {
5072     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5073 }
5074 
5075 static void cout_nabs64(DisasContext *s, DisasOps *o)
5076 {
5077     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5078 }
5079 
5080 static void cout_neg32(DisasContext *s, DisasOps *o)
5081 {
5082     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5083 }
5084 
5085 static void cout_neg64(DisasContext *s, DisasOps *o)
5086 {
5087     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5088 }
5089 
5090 static void cout_nz32(DisasContext *s, DisasOps *o)
5091 {
5092     tcg_gen_ext32u_i64(cc_dst, o->out);
5093     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5094 }
5095 
5096 static void cout_nz64(DisasContext *s, DisasOps *o)
5097 {
5098     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5099 }
5100 
5101 static void cout_s32(DisasContext *s, DisasOps *o)
5102 {
5103     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5104 }
5105 
5106 static void cout_s64(DisasContext *s, DisasOps *o)
5107 {
5108     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5109 }
5110 
5111 static void cout_subs32(DisasContext *s, DisasOps *o)
5112 {
5113     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5114 }
5115 
5116 static void cout_subs64(DisasContext *s, DisasOps *o)
5117 {
5118     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5119 }
5120 
5121 static void cout_subu32(DisasContext *s, DisasOps *o)
5122 {
5123     tcg_gen_sari_i64(cc_src, o->out, 32);
5124     tcg_gen_ext32u_i64(cc_dst, o->out);
5125     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5126 }
5127 
5128 static void cout_subu64(DisasContext *s, DisasOps *o)
5129 {
5130     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5131 }
5132 
5133 static void cout_tm32(DisasContext *s, DisasOps *o)
5134 {
5135     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5136 }
5137 
5138 static void cout_tm64(DisasContext *s, DisasOps *o)
5139 {
5140     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5141 }
5142 
5143 static void cout_muls32(DisasContext *s, DisasOps *o)
5144 {
5145     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5146 }
5147 
5148 static void cout_muls64(DisasContext *s, DisasOps *o)
5149 {
5150     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5151     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5152 }
5153 
5154 /* ====================================================================== */
5155 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5156    with the TCG register to which we will write.  Used in combination with
5157    the "wout" generators, in some cases we need a new temporary, and in
5158    some cases we can write to a TCG global.  */
5159 
5160 static void prep_new(DisasContext *s, DisasOps *o)
5161 {
5162     o->out = tcg_temp_new_i64();
5163 }
5164 #define SPEC_prep_new 0
5165 
5166 static void prep_new_P(DisasContext *s, DisasOps *o)
5167 {
5168     o->out = tcg_temp_new_i64();
5169     o->out2 = tcg_temp_new_i64();
5170 }
5171 #define SPEC_prep_new_P 0
5172 
5173 static void prep_new_x(DisasContext *s, DisasOps *o)
5174 {
5175     o->out_128 = tcg_temp_new_i128();
5176 }
5177 #define SPEC_prep_new_x 0
5178 
5179 static void prep_r1(DisasContext *s, DisasOps *o)
5180 {
5181     o->out = regs[get_field(s, r1)];
5182 }
5183 #define SPEC_prep_r1 0
5184 
5185 static void prep_r1_P(DisasContext *s, DisasOps *o)
5186 {
5187     int r1 = get_field(s, r1);
5188     o->out = regs[r1];
5189     o->out2 = regs[r1 + 1];
5190 }
5191 #define SPEC_prep_r1_P SPEC_r1_even
5192 
5193 /* ====================================================================== */
5194 /* The "Write OUTput" generators.  These generally perform some non-trivial
5195    copy of data to TCG globals, or to main memory.  The trivial cases are
5196    generally handled by having a "prep" generator install the TCG global
5197    as the destination of the operation.  */
5198 
5199 static void wout_r1(DisasContext *s, DisasOps *o)
5200 {
5201     store_reg(get_field(s, r1), o->out);
5202 }
5203 #define SPEC_wout_r1 0
5204 
5205 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5206 {
5207     store_reg(get_field(s, r1), o->out2);
5208 }
5209 #define SPEC_wout_out2_r1 0
5210 
5211 static void wout_r1_8(DisasContext *s, DisasOps *o)
5212 {
5213     int r1 = get_field(s, r1);
5214     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5215 }
5216 #define SPEC_wout_r1_8 0
5217 
5218 static void wout_r1_16(DisasContext *s, DisasOps *o)
5219 {
5220     int r1 = get_field(s, r1);
5221     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5222 }
5223 #define SPEC_wout_r1_16 0
5224 
5225 static void wout_r1_32(DisasContext *s, DisasOps *o)
5226 {
5227     store_reg32_i64(get_field(s, r1), o->out);
5228 }
5229 #define SPEC_wout_r1_32 0
5230 
5231 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5232 {
5233     store_reg32h_i64(get_field(s, r1), o->out);
5234 }
5235 #define SPEC_wout_r1_32h 0
5236 
5237 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5238 {
5239     int r1 = get_field(s, r1);
5240     store_reg32_i64(r1, o->out);
5241     store_reg32_i64(r1 + 1, o->out2);
5242 }
5243 #define SPEC_wout_r1_P32 SPEC_r1_even
5244 
5245 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5246 {
5247     int r1 = get_field(s, r1);
5248     TCGv_i64 t = tcg_temp_new_i64();
5249     store_reg32_i64(r1 + 1, o->out);
5250     tcg_gen_shri_i64(t, o->out, 32);
5251     store_reg32_i64(r1, t);
5252 }
5253 #define SPEC_wout_r1_D32 SPEC_r1_even
5254 
5255 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5256 {
5257     int r1 = get_field(s, r1);
5258     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5259 }
5260 #define SPEC_wout_r1_D64 SPEC_r1_even
5261 
5262 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5263 {
5264     int r3 = get_field(s, r3);
5265     store_reg32_i64(r3, o->out);
5266     store_reg32_i64(r3 + 1, o->out2);
5267 }
5268 #define SPEC_wout_r3_P32 SPEC_r3_even
5269 
5270 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5271 {
5272     int r3 = get_field(s, r3);
5273     store_reg(r3, o->out);
5274     store_reg(r3 + 1, o->out2);
5275 }
5276 #define SPEC_wout_r3_P64 SPEC_r3_even
5277 
5278 static void wout_e1(DisasContext *s, DisasOps *o)
5279 {
5280     store_freg32_i64(get_field(s, r1), o->out);
5281 }
5282 #define SPEC_wout_e1 0
5283 
5284 static void wout_f1(DisasContext *s, DisasOps *o)
5285 {
5286     store_freg(get_field(s, r1), o->out);
5287 }
5288 #define SPEC_wout_f1 0
5289 
5290 static void wout_x1(DisasContext *s, DisasOps *o)
5291 {
5292     int f1 = get_field(s, r1);
5293 
5294     /* Split out_128 into out+out2 for cout_f128. */
5295     tcg_debug_assert(o->out == NULL);
5296     o->out = tcg_temp_new_i64();
5297     o->out2 = tcg_temp_new_i64();
5298 
5299     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5300     store_freg(f1, o->out);
5301     store_freg(f1 + 2, o->out2);
5302 }
5303 #define SPEC_wout_x1 SPEC_r1_f128
5304 
5305 static void wout_x1_P(DisasContext *s, DisasOps *o)
5306 {
5307     int f1 = get_field(s, r1);
5308     store_freg(f1, o->out);
5309     store_freg(f1 + 2, o->out2);
5310 }
5311 #define SPEC_wout_x1_P SPEC_r1_f128
5312 
5313 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5314 {
5315     if (get_field(s, r1) != get_field(s, r2)) {
5316         store_reg32_i64(get_field(s, r1), o->out);
5317     }
5318 }
5319 #define SPEC_wout_cond_r1r2_32 0
5320 
5321 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5322 {
5323     if (get_field(s, r1) != get_field(s, r2)) {
5324         store_freg32_i64(get_field(s, r1), o->out);
5325     }
5326 }
5327 #define SPEC_wout_cond_e1e2 0
5328 
5329 static void wout_m1_8(DisasContext *s, DisasOps *o)
5330 {
5331     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5332 }
5333 #define SPEC_wout_m1_8 0
5334 
5335 static void wout_m1_16(DisasContext *s, DisasOps *o)
5336 {
5337     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5338 }
5339 #define SPEC_wout_m1_16 0
5340 
5341 #ifndef CONFIG_USER_ONLY
5342 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5343 {
5344     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5345 }
5346 #define SPEC_wout_m1_16a 0
5347 #endif
5348 
5349 static void wout_m1_32(DisasContext *s, DisasOps *o)
5350 {
5351     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5352 }
5353 #define SPEC_wout_m1_32 0
5354 
5355 #ifndef CONFIG_USER_ONLY
5356 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5357 {
5358     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5359 }
5360 #define SPEC_wout_m1_32a 0
5361 #endif
5362 
5363 static void wout_m1_64(DisasContext *s, DisasOps *o)
5364 {
5365     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5366 }
5367 #define SPEC_wout_m1_64 0
5368 
5369 #ifndef CONFIG_USER_ONLY
5370 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5371 {
5372     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5373 }
5374 #define SPEC_wout_m1_64a 0
5375 #endif
5376 
5377 static void wout_m2_32(DisasContext *s, DisasOps *o)
5378 {
5379     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5380 }
5381 #define SPEC_wout_m2_32 0
5382 
5383 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5384 {
5385     store_reg(get_field(s, r1), o->in2);
5386 }
5387 #define SPEC_wout_in2_r1 0
5388 
5389 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5390 {
5391     store_reg32_i64(get_field(s, r1), o->in2);
5392 }
5393 #define SPEC_wout_in2_r1_32 0
5394 
5395 /* ====================================================================== */
5396 /* The "INput 1" generators.  These load the first operand to an insn.  */
5397 
5398 static void in1_r1(DisasContext *s, DisasOps *o)
5399 {
5400     o->in1 = load_reg(get_field(s, r1));
5401 }
5402 #define SPEC_in1_r1 0
5403 
5404 static void in1_r1_o(DisasContext *s, DisasOps *o)
5405 {
5406     o->in1 = regs[get_field(s, r1)];
5407 }
5408 #define SPEC_in1_r1_o 0
5409 
5410 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5411 {
5412     o->in1 = tcg_temp_new_i64();
5413     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5414 }
5415 #define SPEC_in1_r1_32s 0
5416 
5417 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5418 {
5419     o->in1 = tcg_temp_new_i64();
5420     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5421 }
5422 #define SPEC_in1_r1_32u 0
5423 
5424 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5425 {
5426     o->in1 = tcg_temp_new_i64();
5427     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5428 }
5429 #define SPEC_in1_r1_sr32 0
5430 
5431 static void in1_r1p1(DisasContext *s, DisasOps *o)
5432 {
5433     o->in1 = load_reg(get_field(s, r1) + 1);
5434 }
5435 #define SPEC_in1_r1p1 SPEC_r1_even
5436 
5437 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5438 {
5439     o->in1 = regs[get_field(s, r1) + 1];
5440 }
5441 #define SPEC_in1_r1p1_o SPEC_r1_even
5442 
5443 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5444 {
5445     o->in1 = tcg_temp_new_i64();
5446     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5447 }
5448 #define SPEC_in1_r1p1_32s SPEC_r1_even
5449 
5450 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5451 {
5452     o->in1 = tcg_temp_new_i64();
5453     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5454 }
5455 #define SPEC_in1_r1p1_32u SPEC_r1_even
5456 
5457 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5458 {
5459     int r1 = get_field(s, r1);
5460     o->in1 = tcg_temp_new_i64();
5461     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5462 }
5463 #define SPEC_in1_r1_D32 SPEC_r1_even
5464 
5465 static void in1_r2(DisasContext *s, DisasOps *o)
5466 {
5467     o->in1 = load_reg(get_field(s, r2));
5468 }
5469 #define SPEC_in1_r2 0
5470 
5471 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5472 {
5473     o->in1 = tcg_temp_new_i64();
5474     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5475 }
5476 #define SPEC_in1_r2_sr32 0
5477 
5478 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5479 {
5480     o->in1 = tcg_temp_new_i64();
5481     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5482 }
5483 #define SPEC_in1_r2_32u 0
5484 
5485 static void in1_r3(DisasContext *s, DisasOps *o)
5486 {
5487     o->in1 = load_reg(get_field(s, r3));
5488 }
5489 #define SPEC_in1_r3 0
5490 
5491 static void in1_r3_o(DisasContext *s, DisasOps *o)
5492 {
5493     o->in1 = regs[get_field(s, r3)];
5494 }
5495 #define SPEC_in1_r3_o 0
5496 
5497 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5498 {
5499     o->in1 = tcg_temp_new_i64();
5500     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5501 }
5502 #define SPEC_in1_r3_32s 0
5503 
5504 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5505 {
5506     o->in1 = tcg_temp_new_i64();
5507     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5508 }
5509 #define SPEC_in1_r3_32u 0
5510 
5511 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5512 {
5513     int r3 = get_field(s, r3);
5514     o->in1 = tcg_temp_new_i64();
5515     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5516 }
5517 #define SPEC_in1_r3_D32 SPEC_r3_even
5518 
5519 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5520 {
5521     o->in1 = tcg_temp_new_i64();
5522     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5523 }
5524 #define SPEC_in1_r3_sr32 0
5525 
5526 static void in1_e1(DisasContext *s, DisasOps *o)
5527 {
5528     o->in1 = load_freg32_i64(get_field(s, r1));
5529 }
5530 #define SPEC_in1_e1 0
5531 
5532 static void in1_f1(DisasContext *s, DisasOps *o)
5533 {
5534     o->in1 = load_freg(get_field(s, r1));
5535 }
5536 #define SPEC_in1_f1 0
5537 
5538 static void in1_x1(DisasContext *s, DisasOps *o)
5539 {
5540     o->in1_128 = load_freg_128(get_field(s, r1));
5541 }
5542 #define SPEC_in1_x1 SPEC_r1_f128
5543 
5544 /* Load the high double word of an extended (128-bit) format FP number */
5545 static void in1_x2h(DisasContext *s, DisasOps *o)
5546 {
5547     o->in1 = load_freg(get_field(s, r2));
5548 }
5549 #define SPEC_in1_x2h SPEC_r2_f128
5550 
5551 static void in1_f3(DisasContext *s, DisasOps *o)
5552 {
5553     o->in1 = load_freg(get_field(s, r3));
5554 }
5555 #define SPEC_in1_f3 0
5556 
5557 static void in1_la1(DisasContext *s, DisasOps *o)
5558 {
5559     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5560 }
5561 #define SPEC_in1_la1 0
5562 
5563 static void in1_la2(DisasContext *s, DisasOps *o)
5564 {
5565     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5566     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5567 }
5568 #define SPEC_in1_la2 0
5569 
5570 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5571 {
5572     in1_la1(s, o);
5573     o->in1 = tcg_temp_new_i64();
5574     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5575 }
5576 #define SPEC_in1_m1_8u 0
5577 
5578 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5579 {
5580     in1_la1(s, o);
5581     o->in1 = tcg_temp_new_i64();
5582     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5583 }
5584 #define SPEC_in1_m1_16s 0
5585 
5586 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5587 {
5588     in1_la1(s, o);
5589     o->in1 = tcg_temp_new_i64();
5590     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5591 }
5592 #define SPEC_in1_m1_16u 0
5593 
5594 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5595 {
5596     in1_la1(s, o);
5597     o->in1 = tcg_temp_new_i64();
5598     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5599 }
5600 #define SPEC_in1_m1_32s 0
5601 
5602 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5603 {
5604     in1_la1(s, o);
5605     o->in1 = tcg_temp_new_i64();
5606     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5607 }
5608 #define SPEC_in1_m1_32u 0
5609 
5610 static void in1_m1_64(DisasContext *s, DisasOps *o)
5611 {
5612     in1_la1(s, o);
5613     o->in1 = tcg_temp_new_i64();
5614     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5615 }
5616 #define SPEC_in1_m1_64 0
5617 
5618 /* ====================================================================== */
5619 /* The "INput 2" generators.  These load the second operand to an insn.  */
5620 
5621 static void in2_r1_o(DisasContext *s, DisasOps *o)
5622 {
5623     o->in2 = regs[get_field(s, r1)];
5624 }
5625 #define SPEC_in2_r1_o 0
5626 
5627 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5628 {
5629     o->in2 = tcg_temp_new_i64();
5630     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5631 }
5632 #define SPEC_in2_r1_16u 0
5633 
5634 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5635 {
5636     o->in2 = tcg_temp_new_i64();
5637     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5638 }
5639 #define SPEC_in2_r1_32u 0
5640 
5641 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5642 {
5643     int r1 = get_field(s, r1);
5644     o->in2 = tcg_temp_new_i64();
5645     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5646 }
5647 #define SPEC_in2_r1_D32 SPEC_r1_even
5648 
5649 static void in2_r2(DisasContext *s, DisasOps *o)
5650 {
5651     o->in2 = load_reg(get_field(s, r2));
5652 }
5653 #define SPEC_in2_r2 0
5654 
5655 static void in2_r2_o(DisasContext *s, DisasOps *o)
5656 {
5657     o->in2 = regs[get_field(s, r2)];
5658 }
5659 #define SPEC_in2_r2_o 0
5660 
5661 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5662 {
5663     int r2 = get_field(s, r2);
5664     if (r2 != 0) {
5665         o->in2 = load_reg(r2);
5666     }
5667 }
5668 #define SPEC_in2_r2_nz 0
5669 
5670 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5671 {
5672     o->in2 = tcg_temp_new_i64();
5673     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5674 }
5675 #define SPEC_in2_r2_8s 0
5676 
5677 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5678 {
5679     o->in2 = tcg_temp_new_i64();
5680     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5681 }
5682 #define SPEC_in2_r2_8u 0
5683 
5684 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5685 {
5686     o->in2 = tcg_temp_new_i64();
5687     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5688 }
5689 #define SPEC_in2_r2_16s 0
5690 
5691 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5692 {
5693     o->in2 = tcg_temp_new_i64();
5694     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5695 }
5696 #define SPEC_in2_r2_16u 0
5697 
5698 static void in2_r3(DisasContext *s, DisasOps *o)
5699 {
5700     o->in2 = load_reg(get_field(s, r3));
5701 }
5702 #define SPEC_in2_r3 0
5703 
5704 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5705 {
5706     int r3 = get_field(s, r3);
5707     o->in2_128 = tcg_temp_new_i128();
5708     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5709 }
5710 #define SPEC_in2_r3_D64 SPEC_r3_even
5711 
5712 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5713 {
5714     o->in2 = tcg_temp_new_i64();
5715     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5716 }
5717 #define SPEC_in2_r3_sr32 0
5718 
5719 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5720 {
5721     o->in2 = tcg_temp_new_i64();
5722     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5723 }
5724 #define SPEC_in2_r3_32u 0
5725 
5726 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5727 {
5728     o->in2 = tcg_temp_new_i64();
5729     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5730 }
5731 #define SPEC_in2_r2_32s 0
5732 
5733 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5734 {
5735     o->in2 = tcg_temp_new_i64();
5736     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5737 }
5738 #define SPEC_in2_r2_32u 0
5739 
5740 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5741 {
5742     o->in2 = tcg_temp_new_i64();
5743     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5744 }
5745 #define SPEC_in2_r2_sr32 0
5746 
5747 static void in2_e2(DisasContext *s, DisasOps *o)
5748 {
5749     o->in2 = load_freg32_i64(get_field(s, r2));
5750 }
5751 #define SPEC_in2_e2 0
5752 
5753 static void in2_f2(DisasContext *s, DisasOps *o)
5754 {
5755     o->in2 = load_freg(get_field(s, r2));
5756 }
5757 #define SPEC_in2_f2 0
5758 
5759 static void in2_x2(DisasContext *s, DisasOps *o)
5760 {
5761     o->in2_128 = load_freg_128(get_field(s, r2));
5762 }
5763 #define SPEC_in2_x2 SPEC_r2_f128
5764 
5765 /* Load the low double word of an extended (128-bit) format FP number */
5766 static void in2_x2l(DisasContext *s, DisasOps *o)
5767 {
5768     o->in2 = load_freg(get_field(s, r2) + 2);
5769 }
5770 #define SPEC_in2_x2l SPEC_r2_f128
5771 
5772 static void in2_ra2(DisasContext *s, DisasOps *o)
5773 {
5774     int r2 = get_field(s, r2);
5775 
5776     /* Note: *don't* treat !r2 as 0, use the reg value. */
5777     o->in2 = tcg_temp_new_i64();
5778     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5779 }
5780 #define SPEC_in2_ra2 0
5781 
5782 static void in2_a2(DisasContext *s, DisasOps *o)
5783 {
5784     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5785     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5786 }
5787 #define SPEC_in2_a2 0
5788 
5789 static TCGv gen_ri2(DisasContext *s)
5790 {
5791     TCGv ri2 = NULL;
5792     bool is_imm;
5793     int imm;
5794 
5795     disas_jdest(s, i2, is_imm, imm, ri2);
5796     if (is_imm) {
5797         ri2 = tcg_constant_i64(s->base.pc_next + (int64_t)imm * 2);
5798     }
5799 
5800     return ri2;
5801 }
5802 
5803 static void in2_ri2(DisasContext *s, DisasOps *o)
5804 {
5805     o->in2 = gen_ri2(s);
5806 }
5807 #define SPEC_in2_ri2 0
5808 
5809 static void in2_sh(DisasContext *s, DisasOps *o)
5810 {
5811     int b2 = get_field(s, b2);
5812     int d2 = get_field(s, d2);
5813 
5814     if (b2 == 0) {
5815         o->in2 = tcg_constant_i64(d2 & 0x3f);
5816     } else {
5817         o->in2 = get_address(s, 0, b2, d2);
5818         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5819     }
5820 }
5821 #define SPEC_in2_sh 0
5822 
5823 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5824 {
5825     in2_a2(s, o);
5826     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5827 }
5828 #define SPEC_in2_m2_8u 0
5829 
5830 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5831 {
5832     in2_a2(s, o);
5833     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5834 }
5835 #define SPEC_in2_m2_16s 0
5836 
5837 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5838 {
5839     in2_a2(s, o);
5840     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5841 }
5842 #define SPEC_in2_m2_16u 0
5843 
5844 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5845 {
5846     in2_a2(s, o);
5847     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5848 }
5849 #define SPEC_in2_m2_32s 0
5850 
5851 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5852 {
5853     in2_a2(s, o);
5854     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5855 }
5856 #define SPEC_in2_m2_32u 0
5857 
5858 #ifndef CONFIG_USER_ONLY
5859 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5860 {
5861     in2_a2(s, o);
5862     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5863 }
5864 #define SPEC_in2_m2_32ua 0
5865 #endif
5866 
5867 static void in2_m2_64(DisasContext *s, DisasOps *o)
5868 {
5869     in2_a2(s, o);
5870     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5871 }
5872 #define SPEC_in2_m2_64 0
5873 
5874 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5875 {
5876     in2_a2(s, o);
5877     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5878     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5879 }
5880 #define SPEC_in2_m2_64w 0
5881 
5882 #ifndef CONFIG_USER_ONLY
5883 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5884 {
5885     in2_a2(s, o);
5886     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5887 }
5888 #define SPEC_in2_m2_64a 0
5889 #endif
5890 
5891 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5892 {
5893     o->in2 = tcg_temp_new_i64();
5894     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5895 }
5896 #define SPEC_in2_mri2_16s 0
5897 
5898 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5899 {
5900     o->in2 = tcg_temp_new_i64();
5901     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5902 }
5903 #define SPEC_in2_mri2_16u 0
5904 
5905 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5906 {
5907     o->in2 = tcg_temp_new_i64();
5908     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5909                        MO_TESL | MO_ALIGN);
5910 }
5911 #define SPEC_in2_mri2_32s 0
5912 
5913 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5914 {
5915     o->in2 = tcg_temp_new_i64();
5916     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5917                        MO_TEUL | MO_ALIGN);
5918 }
5919 #define SPEC_in2_mri2_32u 0
5920 
5921 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5922 {
5923     o->in2 = tcg_temp_new_i64();
5924     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5925                         MO_TEUQ | MO_ALIGN);
5926 }
5927 #define SPEC_in2_mri2_64 0
5928 
5929 static void in2_i2(DisasContext *s, DisasOps *o)
5930 {
5931     o->in2 = tcg_constant_i64(get_field(s, i2));
5932 }
5933 #define SPEC_in2_i2 0
5934 
5935 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5936 {
5937     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5938 }
5939 #define SPEC_in2_i2_8u 0
5940 
5941 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5942 {
5943     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5944 }
5945 #define SPEC_in2_i2_16u 0
5946 
5947 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5948 {
5949     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5950 }
5951 #define SPEC_in2_i2_32u 0
5952 
5953 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5954 {
5955     uint64_t i2 = (uint16_t)get_field(s, i2);
5956     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5957 }
5958 #define SPEC_in2_i2_16u_shl 0
5959 
5960 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5961 {
5962     uint64_t i2 = (uint32_t)get_field(s, i2);
5963     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5964 }
5965 #define SPEC_in2_i2_32u_shl 0
5966 
5967 #ifndef CONFIG_USER_ONLY
5968 static void in2_insn(DisasContext *s, DisasOps *o)
5969 {
5970     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5971 }
5972 #define SPEC_in2_insn 0
5973 #endif
5974 
5975 /* ====================================================================== */
5976 
5977 /* Find opc within the table of insns.  This is formulated as a switch
5978    statement so that (1) we get compile-time notice of cut-paste errors
5979    for duplicated opcodes, and (2) the compiler generates the binary
5980    search tree, rather than us having to post-process the table.  */
5981 
5982 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5983     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5984 
5985 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5986     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5987 
5988 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5989     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5990 
5991 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5992 
5993 enum DisasInsnEnum {
5994 #include "insn-data.h.inc"
5995 };
5996 
5997 #undef E
5998 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
5999     .opc = OPC,                                                             \
6000     .flags = FL,                                                            \
6001     .fmt = FMT_##FT,                                                        \
6002     .fac = FAC_##FC,                                                        \
6003     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6004     .name = #NM,                                                            \
6005     .help_in1 = in1_##I1,                                                   \
6006     .help_in2 = in2_##I2,                                                   \
6007     .help_prep = prep_##P,                                                  \
6008     .help_wout = wout_##W,                                                  \
6009     .help_cout = cout_##CC,                                                 \
6010     .help_op = op_##OP,                                                     \
6011     .data = D                                                               \
6012  },
6013 
6014 /* Allow 0 to be used for NULL in the table below.  */
6015 #define in1_0  NULL
6016 #define in2_0  NULL
6017 #define prep_0  NULL
6018 #define wout_0  NULL
6019 #define cout_0  NULL
6020 #define op_0  NULL
6021 
6022 #define SPEC_in1_0 0
6023 #define SPEC_in2_0 0
6024 #define SPEC_prep_0 0
6025 #define SPEC_wout_0 0
6026 
6027 /* Give smaller names to the various facilities.  */
6028 #define FAC_Z           S390_FEAT_ZARCH
6029 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6030 #define FAC_DFP         S390_FEAT_DFP
6031 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6032 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6033 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6034 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6035 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6036 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6037 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6038 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6039 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6040 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6041 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6042 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6043 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6044 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6045 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6046 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6047 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6048 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6049 #define FAC_SFLE        S390_FEAT_STFLE
6050 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6051 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6052 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6053 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6054 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6055 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6056 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6057 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6058 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6059 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6060 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6061 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6062 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6063 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6064 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6065 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6066 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6067 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6068 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6069 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6070 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6071 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6072 
6073 static const DisasInsn insn_info[] = {
6074 #include "insn-data.h.inc"
6075 };
6076 
6077 #undef E
6078 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6079     case OPC: return &insn_info[insn_ ## NM];
6080 
6081 static const DisasInsn *lookup_opc(uint16_t opc)
6082 {
6083     switch (opc) {
6084 #include "insn-data.h.inc"
6085     default:
6086         return NULL;
6087     }
6088 }
6089 
6090 #undef F
6091 #undef E
6092 #undef D
6093 #undef C
6094 
6095 /* Extract a field from the insn.  The INSN should be left-aligned in
6096    the uint64_t so that we can more easily utilize the big-bit-endian
6097    definitions we extract from the Principals of Operation.  */
6098 
6099 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6100 {
6101     uint32_t r, m;
6102 
6103     if (f->size == 0) {
6104         return;
6105     }
6106 
6107     /* Zero extract the field from the insn.  */
6108     r = (insn << f->beg) >> (64 - f->size);
6109 
6110     /* Sign-extend, or un-swap the field as necessary.  */
6111     switch (f->type) {
6112     case 0: /* unsigned */
6113         break;
6114     case 1: /* signed */
6115         assert(f->size <= 32);
6116         m = 1u << (f->size - 1);
6117         r = (r ^ m) - m;
6118         break;
6119     case 2: /* dl+dh split, signed 20 bit. */
6120         r = ((int8_t)r << 12) | (r >> 8);
6121         break;
6122     case 3: /* MSB stored in RXB */
6123         g_assert(f->size == 4);
6124         switch (f->beg) {
6125         case 8:
6126             r |= extract64(insn, 63 - 36, 1) << 4;
6127             break;
6128         case 12:
6129             r |= extract64(insn, 63 - 37, 1) << 4;
6130             break;
6131         case 16:
6132             r |= extract64(insn, 63 - 38, 1) << 4;
6133             break;
6134         case 32:
6135             r |= extract64(insn, 63 - 39, 1) << 4;
6136             break;
6137         default:
6138             g_assert_not_reached();
6139         }
6140         break;
6141     default:
6142         abort();
6143     }
6144 
6145     /*
6146      * Validate that the "compressed" encoding we selected above is valid.
6147      * I.e. we haven't made two different original fields overlap.
6148      */
6149     assert(((o->presentC >> f->indexC) & 1) == 0);
6150     o->presentC |= 1 << f->indexC;
6151     o->presentO |= 1 << f->indexO;
6152 
6153     o->c[f->indexC] = r;
6154 }
6155 
6156 /* Lookup the insn at the current PC, extracting the operands into O and
6157    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6158 
6159 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6160 {
6161     uint64_t insn, pc = s->base.pc_next;
6162     int op, op2, ilen;
6163     const DisasInsn *info;
6164 
6165     if (unlikely(s->ex_value)) {
6166         /* Drop the EX data now, so that it's clear on exception paths.  */
6167         tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
6168                        offsetof(CPUS390XState, ex_value));
6169 
6170         /* Extract the values saved by EXECUTE.  */
6171         insn = s->ex_value & 0xffffffffffff0000ull;
6172         ilen = s->ex_value & 0xf;
6173 
6174         /* Register insn bytes with translator so plugins work. */
6175         for (int i = 0; i < ilen; i++) {
6176             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6177             translator_fake_ldb(byte, pc + i);
6178         }
6179         op = insn >> 56;
6180     } else {
6181         insn = ld_code2(env, s, pc);
6182         op = (insn >> 8) & 0xff;
6183         ilen = get_ilen(op);
6184         switch (ilen) {
6185         case 2:
6186             insn = insn << 48;
6187             break;
6188         case 4:
6189             insn = ld_code4(env, s, pc) << 32;
6190             break;
6191         case 6:
6192             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6193             break;
6194         default:
6195             g_assert_not_reached();
6196         }
6197     }
6198     s->pc_tmp = s->base.pc_next + ilen;
6199     s->ilen = ilen;
6200 
6201     /* We can't actually determine the insn format until we've looked up
6202        the full insn opcode.  Which we can't do without locating the
6203        secondary opcode.  Assume by default that OP2 is at bit 40; for
6204        those smaller insns that don't actually have a secondary opcode
6205        this will correctly result in OP2 = 0. */
6206     switch (op) {
6207     case 0x01: /* E */
6208     case 0x80: /* S */
6209     case 0x82: /* S */
6210     case 0x93: /* S */
6211     case 0xb2: /* S, RRF, RRE, IE */
6212     case 0xb3: /* RRE, RRD, RRF */
6213     case 0xb9: /* RRE, RRF */
6214     case 0xe5: /* SSE, SIL */
6215         op2 = (insn << 8) >> 56;
6216         break;
6217     case 0xa5: /* RI */
6218     case 0xa7: /* RI */
6219     case 0xc0: /* RIL */
6220     case 0xc2: /* RIL */
6221     case 0xc4: /* RIL */
6222     case 0xc6: /* RIL */
6223     case 0xc8: /* SSF */
6224     case 0xcc: /* RIL */
6225         op2 = (insn << 12) >> 60;
6226         break;
6227     case 0xc5: /* MII */
6228     case 0xc7: /* SMI */
6229     case 0xd0 ... 0xdf: /* SS */
6230     case 0xe1: /* SS */
6231     case 0xe2: /* SS */
6232     case 0xe8: /* SS */
6233     case 0xe9: /* SS */
6234     case 0xea: /* SS */
6235     case 0xee ... 0xf3: /* SS */
6236     case 0xf8 ... 0xfd: /* SS */
6237         op2 = 0;
6238         break;
6239     default:
6240         op2 = (insn << 40) >> 56;
6241         break;
6242     }
6243 
6244     memset(&s->fields, 0, sizeof(s->fields));
6245     s->fields.raw_insn = insn;
6246     s->fields.op = op;
6247     s->fields.op2 = op2;
6248 
6249     /* Lookup the instruction.  */
6250     info = lookup_opc(op << 8 | op2);
6251     s->insn = info;
6252 
6253     /* If we found it, extract the operands.  */
6254     if (info != NULL) {
6255         DisasFormat fmt = info->fmt;
6256         int i;
6257 
6258         for (i = 0; i < NUM_C_FIELD; ++i) {
6259             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6260         }
6261     }
6262     return info;
6263 }
6264 
6265 static bool is_afp_reg(int reg)
6266 {
6267     return reg % 2 || reg > 6;
6268 }
6269 
6270 static bool is_fp_pair(int reg)
6271 {
6272     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6273     return !(reg & 0x2);
6274 }
6275 
6276 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6277 {
6278     const DisasInsn *insn;
6279     DisasJumpType ret = DISAS_NEXT;
6280     DisasOps o = {};
6281     bool icount = false;
6282 
6283     /* Search for the insn in the table.  */
6284     insn = extract_insn(env, s);
6285 
6286     /* Update insn_start now that we know the ILEN.  */
6287     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6288 
6289     /* Not found means unimplemented/illegal opcode.  */
6290     if (insn == NULL) {
6291         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6292                       s->fields.op, s->fields.op2);
6293         gen_illegal_opcode(s);
6294         ret = DISAS_NORETURN;
6295         goto out;
6296     }
6297 
6298 #ifndef CONFIG_USER_ONLY
6299     if (s->base.tb->flags & FLAG_MASK_PER) {
6300         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6301         gen_helper_per_ifetch(cpu_env, addr);
6302     }
6303 #endif
6304 
6305     /* process flags */
6306     if (insn->flags) {
6307         /* privileged instruction */
6308         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6309             gen_program_exception(s, PGM_PRIVILEGED);
6310             ret = DISAS_NORETURN;
6311             goto out;
6312         }
6313 
6314         /* if AFP is not enabled, instructions and registers are forbidden */
6315         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6316             uint8_t dxc = 0;
6317 
6318             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6319                 dxc = 1;
6320             }
6321             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6322                 dxc = 1;
6323             }
6324             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6325                 dxc = 1;
6326             }
6327             if (insn->flags & IF_BFP) {
6328                 dxc = 2;
6329             }
6330             if (insn->flags & IF_DFP) {
6331                 dxc = 3;
6332             }
6333             if (insn->flags & IF_VEC) {
6334                 dxc = 0xfe;
6335             }
6336             if (dxc) {
6337                 gen_data_exception(dxc);
6338                 ret = DISAS_NORETURN;
6339                 goto out;
6340             }
6341         }
6342 
6343         /* if vector instructions not enabled, executing them is forbidden */
6344         if (insn->flags & IF_VEC) {
6345             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6346                 gen_data_exception(0xfe);
6347                 ret = DISAS_NORETURN;
6348                 goto out;
6349             }
6350         }
6351 
6352         /* input/output is the special case for icount mode */
6353         if (unlikely(insn->flags & IF_IO)) {
6354             icount = translator_io_start(&s->base);
6355         }
6356     }
6357 
6358     /* Check for insn specification exceptions.  */
6359     if (insn->spec) {
6360         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6361             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6362             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6363             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6364             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6365             gen_program_exception(s, PGM_SPECIFICATION);
6366             ret = DISAS_NORETURN;
6367             goto out;
6368         }
6369     }
6370 
6371     /* Implement the instruction.  */
6372     if (insn->help_in1) {
6373         insn->help_in1(s, &o);
6374     }
6375     if (insn->help_in2) {
6376         insn->help_in2(s, &o);
6377     }
6378     if (insn->help_prep) {
6379         insn->help_prep(s, &o);
6380     }
6381     if (insn->help_op) {
6382         ret = insn->help_op(s, &o);
6383     }
6384     if (ret != DISAS_NORETURN) {
6385         if (insn->help_wout) {
6386             insn->help_wout(s, &o);
6387         }
6388         if (insn->help_cout) {
6389             insn->help_cout(s, &o);
6390         }
6391     }
6392 
6393     /* io should be the last instruction in tb when icount is enabled */
6394     if (unlikely(icount && ret == DISAS_NEXT)) {
6395         ret = DISAS_TOO_MANY;
6396     }
6397 
6398 #ifndef CONFIG_USER_ONLY
6399     if (s->base.tb->flags & FLAG_MASK_PER) {
6400         /* An exception might be triggered, save PSW if not already done.  */
6401         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6402             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6403         }
6404 
6405         /* Call the helper to check for a possible PER exception.  */
6406         gen_helper_per_check_exception(cpu_env);
6407     }
6408 #endif
6409 
6410 out:
6411     /* Advance to the next instruction.  */
6412     s->base.pc_next = s->pc_tmp;
6413     return ret;
6414 }
6415 
6416 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6417 {
6418     DisasContext *dc = container_of(dcbase, DisasContext, base);
6419 
6420     /* 31-bit mode */
6421     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6422         dc->base.pc_first &= 0x7fffffff;
6423         dc->base.pc_next = dc->base.pc_first;
6424     }
6425 
6426     dc->cc_op = CC_OP_DYNAMIC;
6427     dc->ex_value = dc->base.tb->cs_base;
6428     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6429 }
6430 
6431 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6432 {
6433 }
6434 
6435 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6436 {
6437     DisasContext *dc = container_of(dcbase, DisasContext, base);
6438 
6439     /* Delay the set of ilen until we've read the insn. */
6440     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6441     dc->insn_start = tcg_last_op();
6442 }
6443 
6444 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6445                                 uint64_t pc)
6446 {
6447     uint64_t insn = cpu_lduw_code(env, pc);
6448 
6449     return pc + get_ilen((insn >> 8) & 0xff);
6450 }
6451 
6452 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6453 {
6454     CPUS390XState *env = cs->env_ptr;
6455     DisasContext *dc = container_of(dcbase, DisasContext, base);
6456 
6457     dc->base.is_jmp = translate_one(env, dc);
6458     if (dc->base.is_jmp == DISAS_NEXT) {
6459         if (dc->ex_value ||
6460             !is_same_page(dcbase, dc->base.pc_next) ||
6461             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6462             dc->base.is_jmp = DISAS_TOO_MANY;
6463         }
6464     }
6465 }
6466 
6467 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6468 {
6469     DisasContext *dc = container_of(dcbase, DisasContext, base);
6470 
6471     switch (dc->base.is_jmp) {
6472     case DISAS_NORETURN:
6473         break;
6474     case DISAS_TOO_MANY:
6475         update_psw_addr(dc);
6476         /* FALLTHRU */
6477     case DISAS_PC_UPDATED:
6478         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6479            cc op type is in env */
6480         update_cc_op(dc);
6481         /* FALLTHRU */
6482     case DISAS_PC_CC_UPDATED:
6483         /* Exit the TB, either by raising a debug exception or by return.  */
6484         if (dc->exit_to_mainloop) {
6485             tcg_gen_exit_tb(NULL, 0);
6486         } else {
6487             tcg_gen_lookup_and_goto_ptr();
6488         }
6489         break;
6490     default:
6491         g_assert_not_reached();
6492     }
6493 }
6494 
6495 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6496                                CPUState *cs, FILE *logfile)
6497 {
6498     DisasContext *dc = container_of(dcbase, DisasContext, base);
6499 
6500     if (unlikely(dc->ex_value)) {
6501         /* ??? Unfortunately target_disas can't use host memory.  */
6502         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6503     } else {
6504         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6505         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6506     }
6507 }
6508 
6509 static const TranslatorOps s390x_tr_ops = {
6510     .init_disas_context = s390x_tr_init_disas_context,
6511     .tb_start           = s390x_tr_tb_start,
6512     .insn_start         = s390x_tr_insn_start,
6513     .translate_insn     = s390x_tr_translate_insn,
6514     .tb_stop            = s390x_tr_tb_stop,
6515     .disas_log          = s390x_tr_disas_log,
6516 };
6517 
6518 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6519                            target_ulong pc, void *host_pc)
6520 {
6521     DisasContext dc;
6522 
6523     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6524 }
6525 
6526 void s390x_restore_state_to_opc(CPUState *cs,
6527                                 const TranslationBlock *tb,
6528                                 const uint64_t *data)
6529 {
6530     S390CPU *cpu = S390_CPU(cs);
6531     CPUS390XState *env = &cpu->env;
6532     int cc_op = data[1];
6533 
6534     env->psw.addr = data[0];
6535 
6536     /* Update the CC opcode if it is not already up-to-date.  */
6537     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6538         env->cc_op = cc_op;
6539     }
6540 
6541     /* Record ILEN.  */
6542     env->int_pgm_ilen = data[2];
6543 }
6544