xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision d53106c997e5c8e61e37ae9ff9f0e1f243b03968)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44 
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48 
49 #define HELPER_H "helper.h"
50 #include "exec/helper-info.c.inc"
51 #undef  HELPER_H
52 
53 
54 /* Information that (most) every instruction needs to manipulate.  */
55 typedef struct DisasContext DisasContext;
56 typedef struct DisasInsn DisasInsn;
57 typedef struct DisasFields DisasFields;
58 
59 /*
60  * Define a structure to hold the decoded fields.  We'll store each inside
61  * an array indexed by an enum.  In order to conserve memory, we'll arrange
62  * for fields that do not exist at the same time to overlap, thus the "C"
63  * for compact.  For checking purposes there is an "O" for original index
64  * as well that will be applied to availability bitmaps.
65  */
66 
67 enum DisasFieldIndexO {
68     FLD_O_r1,
69     FLD_O_r2,
70     FLD_O_r3,
71     FLD_O_m1,
72     FLD_O_m3,
73     FLD_O_m4,
74     FLD_O_m5,
75     FLD_O_m6,
76     FLD_O_b1,
77     FLD_O_b2,
78     FLD_O_b4,
79     FLD_O_d1,
80     FLD_O_d2,
81     FLD_O_d4,
82     FLD_O_x2,
83     FLD_O_l1,
84     FLD_O_l2,
85     FLD_O_i1,
86     FLD_O_i2,
87     FLD_O_i3,
88     FLD_O_i4,
89     FLD_O_i5,
90     FLD_O_v1,
91     FLD_O_v2,
92     FLD_O_v3,
93     FLD_O_v4,
94 };
95 
96 enum DisasFieldIndexC {
97     FLD_C_r1 = 0,
98     FLD_C_m1 = 0,
99     FLD_C_b1 = 0,
100     FLD_C_i1 = 0,
101     FLD_C_v1 = 0,
102 
103     FLD_C_r2 = 1,
104     FLD_C_b2 = 1,
105     FLD_C_i2 = 1,
106 
107     FLD_C_r3 = 2,
108     FLD_C_m3 = 2,
109     FLD_C_i3 = 2,
110     FLD_C_v3 = 2,
111 
112     FLD_C_m4 = 3,
113     FLD_C_b4 = 3,
114     FLD_C_i4 = 3,
115     FLD_C_l1 = 3,
116     FLD_C_v4 = 3,
117 
118     FLD_C_i5 = 4,
119     FLD_C_d1 = 4,
120     FLD_C_m5 = 4,
121 
122     FLD_C_d2 = 5,
123     FLD_C_m6 = 5,
124 
125     FLD_C_d4 = 6,
126     FLD_C_x2 = 6,
127     FLD_C_l2 = 6,
128     FLD_C_v2 = 6,
129 
130     NUM_C_FIELD = 7
131 };
132 
133 struct DisasFields {
134     uint64_t raw_insn;
135     unsigned op:8;
136     unsigned op2:8;
137     unsigned presentC:16;
138     unsigned int presentO;
139     int c[NUM_C_FIELD];
140 };
141 
142 struct DisasContext {
143     DisasContextBase base;
144     const DisasInsn *insn;
145     TCGOp *insn_start;
146     DisasFields fields;
147     uint64_t ex_value;
148     /*
149      * During translate_one(), pc_tmp is used to determine the instruction
150      * to be executed after base.pc_next - e.g. next sequential instruction
151      * or a branch target.
152      */
153     uint64_t pc_tmp;
154     uint32_t ilen;
155     enum cc_op cc_op;
156     bool exit_to_mainloop;
157 };
158 
159 /* Information carried about a condition to be evaluated.  */
160 typedef struct {
161     TCGCond cond:8;
162     bool is_64;
163     union {
164         struct { TCGv_i64 a, b; } s64;
165         struct { TCGv_i32 a, b; } s32;
166     } u;
167 } DisasCompare;
168 
169 #ifdef DEBUG_INLINE_BRANCHES
170 static uint64_t inline_branch_hit[CC_OP_MAX];
171 static uint64_t inline_branch_miss[CC_OP_MAX];
172 #endif
173 
174 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
175 {
176     if (s->base.tb->flags & FLAG_MASK_32) {
177         if (s->base.tb->flags & FLAG_MASK_64) {
178             tcg_gen_movi_i64(out, pc);
179             return;
180         }
181         pc |= 0x80000000;
182     }
183     assert(!(s->base.tb->flags & FLAG_MASK_64));
184     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
185 }
186 
187 static TCGv_i64 psw_addr;
188 static TCGv_i64 psw_mask;
189 static TCGv_i64 gbea;
190 
191 static TCGv_i32 cc_op;
192 static TCGv_i64 cc_src;
193 static TCGv_i64 cc_dst;
194 static TCGv_i64 cc_vr;
195 
196 static char cpu_reg_names[16][4];
197 static TCGv_i64 regs[16];
198 
199 void s390x_translate_init(void)
200 {
201     int i;
202 
203     psw_addr = tcg_global_mem_new_i64(cpu_env,
204                                       offsetof(CPUS390XState, psw.addr),
205                                       "psw_addr");
206     psw_mask = tcg_global_mem_new_i64(cpu_env,
207                                       offsetof(CPUS390XState, psw.mask),
208                                       "psw_mask");
209     gbea = tcg_global_mem_new_i64(cpu_env,
210                                   offsetof(CPUS390XState, gbea),
211                                   "gbea");
212 
213     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
214                                    "cc_op");
215     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
216                                     "cc_src");
217     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
218                                     "cc_dst");
219     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
220                                    "cc_vr");
221 
222     for (i = 0; i < 16; i++) {
223         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
224         regs[i] = tcg_global_mem_new(cpu_env,
225                                      offsetof(CPUS390XState, regs[i]),
226                                      cpu_reg_names[i]);
227     }
228 }
229 
230 static inline int vec_full_reg_offset(uint8_t reg)
231 {
232     g_assert(reg < 32);
233     return offsetof(CPUS390XState, vregs[reg][0]);
234 }
235 
236 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
237 {
238     /* Convert element size (es) - e.g. MO_8 - to bytes */
239     const uint8_t bytes = 1 << es;
240     int offs = enr * bytes;
241 
242     /*
243      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
244      * of the 16 byte vector, on both, little and big endian systems.
245      *
246      * Big Endian (target/possible host)
247      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
248      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
249      * W:  [             0][             1] - [             2][             3]
250      * DW: [                             0] - [                             1]
251      *
252      * Little Endian (possible host)
253      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
254      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
255      * W:  [             1][             0] - [             3][             2]
256      * DW: [                             0] - [                             1]
257      *
258      * For 16 byte elements, the two 8 byte halves will not form a host
259      * int128 if the host is little endian, since they're in the wrong order.
260      * Some operations (e.g. xor) do not care. For operations like addition,
261      * the two 8 byte elements have to be loaded separately. Let's force all
262      * 16 byte operations to handle it in a special way.
263      */
264     g_assert(es <= MO_64);
265 #if !HOST_BIG_ENDIAN
266     offs ^= (8 - bytes);
267 #endif
268     return offs + vec_full_reg_offset(reg);
269 }
270 
271 static inline int freg64_offset(uint8_t reg)
272 {
273     g_assert(reg < 16);
274     return vec_reg_offset(reg, 0, MO_64);
275 }
276 
277 static inline int freg32_offset(uint8_t reg)
278 {
279     g_assert(reg < 16);
280     return vec_reg_offset(reg, 0, MO_32);
281 }
282 
283 static TCGv_i64 load_reg(int reg)
284 {
285     TCGv_i64 r = tcg_temp_new_i64();
286     tcg_gen_mov_i64(r, regs[reg]);
287     return r;
288 }
289 
290 static TCGv_i64 load_freg(int reg)
291 {
292     TCGv_i64 r = tcg_temp_new_i64();
293 
294     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
295     return r;
296 }
297 
298 static TCGv_i64 load_freg32_i64(int reg)
299 {
300     TCGv_i64 r = tcg_temp_new_i64();
301 
302     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
303     return r;
304 }
305 
306 static TCGv_i128 load_freg_128(int reg)
307 {
308     TCGv_i64 h = load_freg(reg);
309     TCGv_i64 l = load_freg(reg + 2);
310     TCGv_i128 r = tcg_temp_new_i128();
311 
312     tcg_gen_concat_i64_i128(r, l, h);
313     return r;
314 }
315 
316 static void store_reg(int reg, TCGv_i64 v)
317 {
318     tcg_gen_mov_i64(regs[reg], v);
319 }
320 
321 static void store_freg(int reg, TCGv_i64 v)
322 {
323     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
324 }
325 
326 static void store_reg32_i64(int reg, TCGv_i64 v)
327 {
328     /* 32 bit register writes keep the upper half */
329     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
330 }
331 
332 static void store_reg32h_i64(int reg, TCGv_i64 v)
333 {
334     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
335 }
336 
337 static void store_freg32_i64(int reg, TCGv_i64 v)
338 {
339     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
340 }
341 
342 static void update_psw_addr(DisasContext *s)
343 {
344     /* psw.addr */
345     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
346 }
347 
348 static void per_branch(DisasContext *s, bool to_next)
349 {
350 #ifndef CONFIG_USER_ONLY
351     tcg_gen_movi_i64(gbea, s->base.pc_next);
352 
353     if (s->base.tb->flags & FLAG_MASK_PER) {
354         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
355         gen_helper_per_branch(cpu_env, gbea, next_pc);
356     }
357 #endif
358 }
359 
360 static void per_branch_cond(DisasContext *s, TCGCond cond,
361                             TCGv_i64 arg1, TCGv_i64 arg2)
362 {
363 #ifndef CONFIG_USER_ONLY
364     if (s->base.tb->flags & FLAG_MASK_PER) {
365         TCGLabel *lab = gen_new_label();
366         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
367 
368         tcg_gen_movi_i64(gbea, s->base.pc_next);
369         gen_helper_per_branch(cpu_env, gbea, psw_addr);
370 
371         gen_set_label(lab);
372     } else {
373         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
374         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
375     }
376 #endif
377 }
378 
379 static void per_breaking_event(DisasContext *s)
380 {
381     tcg_gen_movi_i64(gbea, s->base.pc_next);
382 }
383 
384 static void update_cc_op(DisasContext *s)
385 {
386     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
387         tcg_gen_movi_i32(cc_op, s->cc_op);
388     }
389 }
390 
391 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
392                                 uint64_t pc)
393 {
394     return (uint64_t)translator_lduw(env, &s->base, pc);
395 }
396 
397 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
398                                 uint64_t pc)
399 {
400     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
401 }
402 
403 static int get_mem_index(DisasContext *s)
404 {
405 #ifdef CONFIG_USER_ONLY
406     return MMU_USER_IDX;
407 #else
408     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
409         return MMU_REAL_IDX;
410     }
411 
412     switch (s->base.tb->flags & FLAG_MASK_ASC) {
413     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
414         return MMU_PRIMARY_IDX;
415     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
416         return MMU_SECONDARY_IDX;
417     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
418         return MMU_HOME_IDX;
419     default:
420         g_assert_not_reached();
421         break;
422     }
423 #endif
424 }
425 
426 static void gen_exception(int excp)
427 {
428     gen_helper_exception(cpu_env, tcg_constant_i32(excp));
429 }
430 
431 static void gen_program_exception(DisasContext *s, int code)
432 {
433     /* Remember what pgm exeption this was.  */
434     tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
435                    offsetof(CPUS390XState, int_pgm_code));
436 
437     tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
438                    offsetof(CPUS390XState, int_pgm_ilen));
439 
440     /* update the psw */
441     update_psw_addr(s);
442 
443     /* Save off cc.  */
444     update_cc_op(s);
445 
446     /* Trigger exception.  */
447     gen_exception(EXCP_PGM);
448 }
449 
450 static inline void gen_illegal_opcode(DisasContext *s)
451 {
452     gen_program_exception(s, PGM_OPERATION);
453 }
454 
455 static inline void gen_data_exception(uint8_t dxc)
456 {
457     gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
458 }
459 
460 static inline void gen_trap(DisasContext *s)
461 {
462     /* Set DXC to 0xff */
463     gen_data_exception(0xff);
464 }
465 
466 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
467                                   int64_t imm)
468 {
469     tcg_gen_addi_i64(dst, src, imm);
470     if (!(s->base.tb->flags & FLAG_MASK_64)) {
471         if (s->base.tb->flags & FLAG_MASK_32) {
472             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
473         } else {
474             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
475         }
476     }
477 }
478 
479 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
480 {
481     TCGv_i64 tmp = tcg_temp_new_i64();
482 
483     /*
484      * Note that d2 is limited to 20 bits, signed.  If we crop negative
485      * displacements early we create larger immediate addends.
486      */
487     if (b2 && x2) {
488         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
489         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
490     } else if (b2) {
491         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
492     } else if (x2) {
493         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
494     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
495         if (s->base.tb->flags & FLAG_MASK_32) {
496             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
497         } else {
498             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
499         }
500     } else {
501         tcg_gen_movi_i64(tmp, d2);
502     }
503 
504     return tmp;
505 }
506 
507 static inline bool live_cc_data(DisasContext *s)
508 {
509     return (s->cc_op != CC_OP_DYNAMIC
510             && s->cc_op != CC_OP_STATIC
511             && s->cc_op > 3);
512 }
513 
514 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
515 {
516     if (live_cc_data(s)) {
517         tcg_gen_discard_i64(cc_src);
518         tcg_gen_discard_i64(cc_dst);
519         tcg_gen_discard_i64(cc_vr);
520     }
521     s->cc_op = CC_OP_CONST0 + val;
522 }
523 
524 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
525 {
526     if (live_cc_data(s)) {
527         tcg_gen_discard_i64(cc_src);
528         tcg_gen_discard_i64(cc_vr);
529     }
530     tcg_gen_mov_i64(cc_dst, dst);
531     s->cc_op = op;
532 }
533 
534 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
535                                   TCGv_i64 dst)
536 {
537     if (live_cc_data(s)) {
538         tcg_gen_discard_i64(cc_vr);
539     }
540     tcg_gen_mov_i64(cc_src, src);
541     tcg_gen_mov_i64(cc_dst, dst);
542     s->cc_op = op;
543 }
544 
545 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
546                                   TCGv_i64 dst, TCGv_i64 vr)
547 {
548     tcg_gen_mov_i64(cc_src, src);
549     tcg_gen_mov_i64(cc_dst, dst);
550     tcg_gen_mov_i64(cc_vr, vr);
551     s->cc_op = op;
552 }
553 
554 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
555 {
556     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
557 }
558 
559 /* CC value is in env->cc_op */
560 static void set_cc_static(DisasContext *s)
561 {
562     if (live_cc_data(s)) {
563         tcg_gen_discard_i64(cc_src);
564         tcg_gen_discard_i64(cc_dst);
565         tcg_gen_discard_i64(cc_vr);
566     }
567     s->cc_op = CC_OP_STATIC;
568 }
569 
570 /* calculates cc into cc_op */
571 static void gen_op_calc_cc(DisasContext *s)
572 {
573     TCGv_i32 local_cc_op = NULL;
574     TCGv_i64 dummy = NULL;
575 
576     switch (s->cc_op) {
577     default:
578         dummy = tcg_constant_i64(0);
579         /* FALLTHRU */
580     case CC_OP_ADD_64:
581     case CC_OP_SUB_64:
582     case CC_OP_ADD_32:
583     case CC_OP_SUB_32:
584         local_cc_op = tcg_constant_i32(s->cc_op);
585         break;
586     case CC_OP_CONST0:
587     case CC_OP_CONST1:
588     case CC_OP_CONST2:
589     case CC_OP_CONST3:
590     case CC_OP_STATIC:
591     case CC_OP_DYNAMIC:
592         break;
593     }
594 
595     switch (s->cc_op) {
596     case CC_OP_CONST0:
597     case CC_OP_CONST1:
598     case CC_OP_CONST2:
599     case CC_OP_CONST3:
600         /* s->cc_op is the cc value */
601         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
602         break;
603     case CC_OP_STATIC:
604         /* env->cc_op already is the cc value */
605         break;
606     case CC_OP_NZ:
607         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
608         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
609         break;
610     case CC_OP_ABS_64:
611     case CC_OP_NABS_64:
612     case CC_OP_ABS_32:
613     case CC_OP_NABS_32:
614     case CC_OP_LTGT0_32:
615     case CC_OP_LTGT0_64:
616     case CC_OP_COMP_32:
617     case CC_OP_COMP_64:
618     case CC_OP_NZ_F32:
619     case CC_OP_NZ_F64:
620     case CC_OP_FLOGR:
621     case CC_OP_LCBB:
622     case CC_OP_MULS_32:
623         /* 1 argument */
624         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
625         break;
626     case CC_OP_ADDU:
627     case CC_OP_ICM:
628     case CC_OP_LTGT_32:
629     case CC_OP_LTGT_64:
630     case CC_OP_LTUGTU_32:
631     case CC_OP_LTUGTU_64:
632     case CC_OP_TM_32:
633     case CC_OP_TM_64:
634     case CC_OP_SLA:
635     case CC_OP_SUBU:
636     case CC_OP_NZ_F128:
637     case CC_OP_VC:
638     case CC_OP_MULS_64:
639         /* 2 arguments */
640         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
641         break;
642     case CC_OP_ADD_64:
643     case CC_OP_SUB_64:
644     case CC_OP_ADD_32:
645     case CC_OP_SUB_32:
646         /* 3 arguments */
647         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
648         break;
649     case CC_OP_DYNAMIC:
650         /* unknown operation - assume 3 arguments and cc_op in env */
651         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
652         break;
653     default:
654         g_assert_not_reached();
655     }
656 
657     /* We now have cc in cc_op as constant */
658     set_cc_static(s);
659 }
660 
661 static bool use_goto_tb(DisasContext *s, uint64_t dest)
662 {
663     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
664         return false;
665     }
666     return translator_use_goto_tb(&s->base, dest);
667 }
668 
669 static void account_noninline_branch(DisasContext *s, int cc_op)
670 {
671 #ifdef DEBUG_INLINE_BRANCHES
672     inline_branch_miss[cc_op]++;
673 #endif
674 }
675 
676 static void account_inline_branch(DisasContext *s, int cc_op)
677 {
678 #ifdef DEBUG_INLINE_BRANCHES
679     inline_branch_hit[cc_op]++;
680 #endif
681 }
682 
683 /* Table of mask values to comparison codes, given a comparison as input.
684    For such, CC=3 should not be possible.  */
685 static const TCGCond ltgt_cond[16] = {
686     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
687     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
688     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
689     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
690     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
691     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
692     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
693     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
694 };
695 
696 /* Table of mask values to comparison codes, given a logic op as input.
697    For such, only CC=0 and CC=1 should be possible.  */
698 static const TCGCond nz_cond[16] = {
699     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
700     TCG_COND_NEVER, TCG_COND_NEVER,
701     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
702     TCG_COND_NE, TCG_COND_NE,
703     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
704     TCG_COND_EQ, TCG_COND_EQ,
705     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
706     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
707 };
708 
709 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
710    details required to generate a TCG comparison.  */
711 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
712 {
713     TCGCond cond;
714     enum cc_op old_cc_op = s->cc_op;
715 
716     if (mask == 15 || mask == 0) {
717         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
718         c->u.s32.a = cc_op;
719         c->u.s32.b = cc_op;
720         c->is_64 = false;
721         return;
722     }
723 
724     /* Find the TCG condition for the mask + cc op.  */
725     switch (old_cc_op) {
726     case CC_OP_LTGT0_32:
727     case CC_OP_LTGT0_64:
728     case CC_OP_LTGT_32:
729     case CC_OP_LTGT_64:
730         cond = ltgt_cond[mask];
731         if (cond == TCG_COND_NEVER) {
732             goto do_dynamic;
733         }
734         account_inline_branch(s, old_cc_op);
735         break;
736 
737     case CC_OP_LTUGTU_32:
738     case CC_OP_LTUGTU_64:
739         cond = tcg_unsigned_cond(ltgt_cond[mask]);
740         if (cond == TCG_COND_NEVER) {
741             goto do_dynamic;
742         }
743         account_inline_branch(s, old_cc_op);
744         break;
745 
746     case CC_OP_NZ:
747         cond = nz_cond[mask];
748         if (cond == TCG_COND_NEVER) {
749             goto do_dynamic;
750         }
751         account_inline_branch(s, old_cc_op);
752         break;
753 
754     case CC_OP_TM_32:
755     case CC_OP_TM_64:
756         switch (mask) {
757         case 8:
758             cond = TCG_COND_EQ;
759             break;
760         case 4 | 2 | 1:
761             cond = TCG_COND_NE;
762             break;
763         default:
764             goto do_dynamic;
765         }
766         account_inline_branch(s, old_cc_op);
767         break;
768 
769     case CC_OP_ICM:
770         switch (mask) {
771         case 8:
772             cond = TCG_COND_EQ;
773             break;
774         case 4 | 2 | 1:
775         case 4 | 2:
776             cond = TCG_COND_NE;
777             break;
778         default:
779             goto do_dynamic;
780         }
781         account_inline_branch(s, old_cc_op);
782         break;
783 
784     case CC_OP_FLOGR:
785         switch (mask & 0xa) {
786         case 8: /* src == 0 -> no one bit found */
787             cond = TCG_COND_EQ;
788             break;
789         case 2: /* src != 0 -> one bit found */
790             cond = TCG_COND_NE;
791             break;
792         default:
793             goto do_dynamic;
794         }
795         account_inline_branch(s, old_cc_op);
796         break;
797 
798     case CC_OP_ADDU:
799     case CC_OP_SUBU:
800         switch (mask) {
801         case 8 | 2: /* result == 0 */
802             cond = TCG_COND_EQ;
803             break;
804         case 4 | 1: /* result != 0 */
805             cond = TCG_COND_NE;
806             break;
807         case 8 | 4: /* !carry (borrow) */
808             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
809             break;
810         case 2 | 1: /* carry (!borrow) */
811             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
812             break;
813         default:
814             goto do_dynamic;
815         }
816         account_inline_branch(s, old_cc_op);
817         break;
818 
819     default:
820     do_dynamic:
821         /* Calculate cc value.  */
822         gen_op_calc_cc(s);
823         /* FALLTHRU */
824 
825     case CC_OP_STATIC:
826         /* Jump based on CC.  We'll load up the real cond below;
827            the assignment here merely avoids a compiler warning.  */
828         account_noninline_branch(s, old_cc_op);
829         old_cc_op = CC_OP_STATIC;
830         cond = TCG_COND_NEVER;
831         break;
832     }
833 
834     /* Load up the arguments of the comparison.  */
835     c->is_64 = true;
836     switch (old_cc_op) {
837     case CC_OP_LTGT0_32:
838         c->is_64 = false;
839         c->u.s32.a = tcg_temp_new_i32();
840         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
841         c->u.s32.b = tcg_constant_i32(0);
842         break;
843     case CC_OP_LTGT_32:
844     case CC_OP_LTUGTU_32:
845         c->is_64 = false;
846         c->u.s32.a = tcg_temp_new_i32();
847         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
848         c->u.s32.b = tcg_temp_new_i32();
849         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
850         break;
851 
852     case CC_OP_LTGT0_64:
853     case CC_OP_NZ:
854     case CC_OP_FLOGR:
855         c->u.s64.a = cc_dst;
856         c->u.s64.b = tcg_constant_i64(0);
857         break;
858     case CC_OP_LTGT_64:
859     case CC_OP_LTUGTU_64:
860         c->u.s64.a = cc_src;
861         c->u.s64.b = cc_dst;
862         break;
863 
864     case CC_OP_TM_32:
865     case CC_OP_TM_64:
866     case CC_OP_ICM:
867         c->u.s64.a = tcg_temp_new_i64();
868         c->u.s64.b = tcg_constant_i64(0);
869         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
870         break;
871 
872     case CC_OP_ADDU:
873     case CC_OP_SUBU:
874         c->is_64 = true;
875         c->u.s64.b = tcg_constant_i64(0);
876         switch (mask) {
877         case 8 | 2:
878         case 4 | 1: /* result */
879             c->u.s64.a = cc_dst;
880             break;
881         case 8 | 4:
882         case 2 | 1: /* carry */
883             c->u.s64.a = cc_src;
884             break;
885         default:
886             g_assert_not_reached();
887         }
888         break;
889 
890     case CC_OP_STATIC:
891         c->is_64 = false;
892         c->u.s32.a = cc_op;
893         switch (mask) {
894         case 0x8 | 0x4 | 0x2: /* cc != 3 */
895             cond = TCG_COND_NE;
896             c->u.s32.b = tcg_constant_i32(3);
897             break;
898         case 0x8 | 0x4 | 0x1: /* cc != 2 */
899             cond = TCG_COND_NE;
900             c->u.s32.b = tcg_constant_i32(2);
901             break;
902         case 0x8 | 0x2 | 0x1: /* cc != 1 */
903             cond = TCG_COND_NE;
904             c->u.s32.b = tcg_constant_i32(1);
905             break;
906         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
907             cond = TCG_COND_EQ;
908             c->u.s32.a = tcg_temp_new_i32();
909             c->u.s32.b = tcg_constant_i32(0);
910             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
911             break;
912         case 0x8 | 0x4: /* cc < 2 */
913             cond = TCG_COND_LTU;
914             c->u.s32.b = tcg_constant_i32(2);
915             break;
916         case 0x8: /* cc == 0 */
917             cond = TCG_COND_EQ;
918             c->u.s32.b = tcg_constant_i32(0);
919             break;
920         case 0x4 | 0x2 | 0x1: /* cc != 0 */
921             cond = TCG_COND_NE;
922             c->u.s32.b = tcg_constant_i32(0);
923             break;
924         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
925             cond = TCG_COND_NE;
926             c->u.s32.a = tcg_temp_new_i32();
927             c->u.s32.b = tcg_constant_i32(0);
928             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
929             break;
930         case 0x4: /* cc == 1 */
931             cond = TCG_COND_EQ;
932             c->u.s32.b = tcg_constant_i32(1);
933             break;
934         case 0x2 | 0x1: /* cc > 1 */
935             cond = TCG_COND_GTU;
936             c->u.s32.b = tcg_constant_i32(1);
937             break;
938         case 0x2: /* cc == 2 */
939             cond = TCG_COND_EQ;
940             c->u.s32.b = tcg_constant_i32(2);
941             break;
942         case 0x1: /* cc == 3 */
943             cond = TCG_COND_EQ;
944             c->u.s32.b = tcg_constant_i32(3);
945             break;
946         default:
947             /* CC is masked by something else: (8 >> cc) & mask.  */
948             cond = TCG_COND_NE;
949             c->u.s32.a = tcg_temp_new_i32();
950             c->u.s32.b = tcg_constant_i32(0);
951             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
952             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
953             break;
954         }
955         break;
956 
957     default:
958         abort();
959     }
960     c->cond = cond;
961 }
962 
963 /* ====================================================================== */
964 /* Define the insn format enumeration.  */
965 #define F0(N)                         FMT_##N,
966 #define F1(N, X1)                     F0(N)
967 #define F2(N, X1, X2)                 F0(N)
968 #define F3(N, X1, X2, X3)             F0(N)
969 #define F4(N, X1, X2, X3, X4)         F0(N)
970 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
971 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
972 
973 typedef enum {
974 #include "insn-format.h.inc"
975 } DisasFormat;
976 
977 #undef F0
978 #undef F1
979 #undef F2
980 #undef F3
981 #undef F4
982 #undef F5
983 #undef F6
984 
985 /* This is the way fields are to be accessed out of DisasFields.  */
986 #define have_field(S, F)  have_field1((S), FLD_O_##F)
987 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
988 
989 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
990 {
991     return (s->fields.presentO >> c) & 1;
992 }
993 
994 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
995                       enum DisasFieldIndexC c)
996 {
997     assert(have_field1(s, o));
998     return s->fields.c[c];
999 }
1000 
1001 /* Describe the layout of each field in each format.  */
1002 typedef struct DisasField {
1003     unsigned int beg:8;
1004     unsigned int size:8;
1005     unsigned int type:2;
1006     unsigned int indexC:6;
1007     enum DisasFieldIndexO indexO:8;
1008 } DisasField;
1009 
1010 typedef struct DisasFormatInfo {
1011     DisasField op[NUM_C_FIELD];
1012 } DisasFormatInfo;
1013 
1014 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1015 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1016 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1017 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1018                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1019 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1020                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1021                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1022 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1023                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1024 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1025                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1026                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1027 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1028 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1029 
1030 #define F0(N)                     { { } },
1031 #define F1(N, X1)                 { { X1 } },
1032 #define F2(N, X1, X2)             { { X1, X2 } },
1033 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1034 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1035 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1036 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1037 
1038 static const DisasFormatInfo format_info[] = {
1039 #include "insn-format.h.inc"
1040 };
1041 
1042 #undef F0
1043 #undef F1
1044 #undef F2
1045 #undef F3
1046 #undef F4
1047 #undef F5
1048 #undef F6
1049 #undef R
1050 #undef M
1051 #undef V
1052 #undef BD
1053 #undef BXD
1054 #undef BDL
1055 #undef BXDL
1056 #undef I
1057 #undef L
1058 
1059 /* Generally, we'll extract operands into this structures, operate upon
1060    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1061    of routines below for more details.  */
1062 typedef struct {
1063     TCGv_i64 out, out2, in1, in2;
1064     TCGv_i64 addr1;
1065     TCGv_i128 out_128, in1_128, in2_128;
1066 } DisasOps;
1067 
1068 /* Instructions can place constraints on their operands, raising specification
1069    exceptions if they are violated.  To make this easy to automate, each "in1",
1070    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1071    of the following, or 0.  To make this easy to document, we'll put the
1072    SPEC_<name> defines next to <name>.  */
1073 
1074 #define SPEC_r1_even    1
1075 #define SPEC_r2_even    2
1076 #define SPEC_r3_even    4
1077 #define SPEC_r1_f128    8
1078 #define SPEC_r2_f128    16
1079 
1080 /* Return values from translate_one, indicating the state of the TB.  */
1081 
1082 /* We are not using a goto_tb (for whatever reason), but have updated
1083    the PC (for whatever reason), so there's no need to do it again on
1084    exiting the TB.  */
1085 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1086 
1087 /* We have updated the PC and CC values.  */
1088 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1089 
1090 
1091 /* Instruction flags */
1092 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1093 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1094 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1095 #define IF_BFP      0x0008      /* binary floating point instruction */
1096 #define IF_DFP      0x0010      /* decimal floating point instruction */
1097 #define IF_PRIV     0x0020      /* privileged instruction */
1098 #define IF_VEC      0x0040      /* vector instruction */
1099 #define IF_IO       0x0080      /* input/output instruction */
1100 
1101 struct DisasInsn {
1102     unsigned opc:16;
1103     unsigned flags:16;
1104     DisasFormat fmt:8;
1105     unsigned fac:8;
1106     unsigned spec:8;
1107 
1108     const char *name;
1109 
1110     /* Pre-process arguments before HELP_OP.  */
1111     void (*help_in1)(DisasContext *, DisasOps *);
1112     void (*help_in2)(DisasContext *, DisasOps *);
1113     void (*help_prep)(DisasContext *, DisasOps *);
1114 
1115     /*
1116      * Post-process output after HELP_OP.
1117      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1118      */
1119     void (*help_wout)(DisasContext *, DisasOps *);
1120     void (*help_cout)(DisasContext *, DisasOps *);
1121 
1122     /* Implement the operation itself.  */
1123     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1124 
1125     uint64_t data;
1126 };
1127 
1128 /* ====================================================================== */
1129 /* Miscellaneous helpers, used by several operations.  */
1130 
1131 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1132 {
1133     if (dest == s->pc_tmp) {
1134         per_branch(s, true);
1135         return DISAS_NEXT;
1136     }
1137     if (use_goto_tb(s, dest)) {
1138         update_cc_op(s);
1139         per_breaking_event(s);
1140         tcg_gen_goto_tb(0);
1141         tcg_gen_movi_i64(psw_addr, dest);
1142         tcg_gen_exit_tb(s->base.tb, 0);
1143         return DISAS_NORETURN;
1144     } else {
1145         tcg_gen_movi_i64(psw_addr, dest);
1146         per_branch(s, false);
1147         return DISAS_PC_UPDATED;
1148     }
1149 }
1150 
1151 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1152                                  bool is_imm, int imm, TCGv_i64 cdest)
1153 {
1154     DisasJumpType ret;
1155     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1156     TCGLabel *lab;
1157 
1158     /* Take care of the special cases first.  */
1159     if (c->cond == TCG_COND_NEVER) {
1160         ret = DISAS_NEXT;
1161         goto egress;
1162     }
1163     if (is_imm) {
1164         if (dest == s->pc_tmp) {
1165             /* Branch to next.  */
1166             per_branch(s, true);
1167             ret = DISAS_NEXT;
1168             goto egress;
1169         }
1170         if (c->cond == TCG_COND_ALWAYS) {
1171             ret = help_goto_direct(s, dest);
1172             goto egress;
1173         }
1174     } else {
1175         if (!cdest) {
1176             /* E.g. bcr %r0 -> no branch.  */
1177             ret = DISAS_NEXT;
1178             goto egress;
1179         }
1180         if (c->cond == TCG_COND_ALWAYS) {
1181             tcg_gen_mov_i64(psw_addr, cdest);
1182             per_branch(s, false);
1183             ret = DISAS_PC_UPDATED;
1184             goto egress;
1185         }
1186     }
1187 
1188     if (use_goto_tb(s, s->pc_tmp)) {
1189         if (is_imm && use_goto_tb(s, dest)) {
1190             /* Both exits can use goto_tb.  */
1191             update_cc_op(s);
1192 
1193             lab = gen_new_label();
1194             if (c->is_64) {
1195                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1196             } else {
1197                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1198             }
1199 
1200             /* Branch not taken.  */
1201             tcg_gen_goto_tb(0);
1202             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1203             tcg_gen_exit_tb(s->base.tb, 0);
1204 
1205             /* Branch taken.  */
1206             gen_set_label(lab);
1207             per_breaking_event(s);
1208             tcg_gen_goto_tb(1);
1209             tcg_gen_movi_i64(psw_addr, dest);
1210             tcg_gen_exit_tb(s->base.tb, 1);
1211 
1212             ret = DISAS_NORETURN;
1213         } else {
1214             /* Fallthru can use goto_tb, but taken branch cannot.  */
1215             /* Store taken branch destination before the brcond.  This
1216                avoids having to allocate a new local temp to hold it.
1217                We'll overwrite this in the not taken case anyway.  */
1218             if (!is_imm) {
1219                 tcg_gen_mov_i64(psw_addr, cdest);
1220             }
1221 
1222             lab = gen_new_label();
1223             if (c->is_64) {
1224                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1225             } else {
1226                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1227             }
1228 
1229             /* Branch not taken.  */
1230             update_cc_op(s);
1231             tcg_gen_goto_tb(0);
1232             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1233             tcg_gen_exit_tb(s->base.tb, 0);
1234 
1235             gen_set_label(lab);
1236             if (is_imm) {
1237                 tcg_gen_movi_i64(psw_addr, dest);
1238             }
1239             per_breaking_event(s);
1240             ret = DISAS_PC_UPDATED;
1241         }
1242     } else {
1243         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1244            Most commonly we're single-stepping or some other condition that
1245            disables all use of goto_tb.  Just update the PC and exit.  */
1246 
1247         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1248         if (is_imm) {
1249             cdest = tcg_constant_i64(dest);
1250         }
1251 
1252         if (c->is_64) {
1253             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1254                                 cdest, next);
1255             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1256         } else {
1257             TCGv_i32 t0 = tcg_temp_new_i32();
1258             TCGv_i64 t1 = tcg_temp_new_i64();
1259             TCGv_i64 z = tcg_constant_i64(0);
1260             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1261             tcg_gen_extu_i32_i64(t1, t0);
1262             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1263             per_branch_cond(s, TCG_COND_NE, t1, z);
1264         }
1265 
1266         ret = DISAS_PC_UPDATED;
1267     }
1268 
1269  egress:
1270     return ret;
1271 }
1272 
1273 /* ====================================================================== */
1274 /* The operations.  These perform the bulk of the work for any insn,
1275    usually after the operands have been loaded and output initialized.  */
1276 
1277 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1278 {
1279     tcg_gen_abs_i64(o->out, o->in2);
1280     return DISAS_NEXT;
1281 }
1282 
1283 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1284 {
1285     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1286     return DISAS_NEXT;
1287 }
1288 
1289 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1290 {
1291     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1292     return DISAS_NEXT;
1293 }
1294 
1295 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1296 {
1297     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1298     tcg_gen_mov_i64(o->out2, o->in2);
1299     return DISAS_NEXT;
1300 }
1301 
1302 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1303 {
1304     tcg_gen_add_i64(o->out, o->in1, o->in2);
1305     return DISAS_NEXT;
1306 }
1307 
1308 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1309 {
1310     tcg_gen_movi_i64(cc_src, 0);
1311     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1312     return DISAS_NEXT;
1313 }
1314 
1315 /* Compute carry into cc_src. */
1316 static void compute_carry(DisasContext *s)
1317 {
1318     switch (s->cc_op) {
1319     case CC_OP_ADDU:
1320         /* The carry value is already in cc_src (1,0). */
1321         break;
1322     case CC_OP_SUBU:
1323         tcg_gen_addi_i64(cc_src, cc_src, 1);
1324         break;
1325     default:
1326         gen_op_calc_cc(s);
1327         /* fall through */
1328     case CC_OP_STATIC:
1329         /* The carry flag is the msb of CC; compute into cc_src. */
1330         tcg_gen_extu_i32_i64(cc_src, cc_op);
1331         tcg_gen_shri_i64(cc_src, cc_src, 1);
1332         break;
1333     }
1334 }
1335 
1336 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1337 {
1338     compute_carry(s);
1339     tcg_gen_add_i64(o->out, o->in1, o->in2);
1340     tcg_gen_add_i64(o->out, o->out, cc_src);
1341     return DISAS_NEXT;
1342 }
1343 
1344 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1345 {
1346     compute_carry(s);
1347 
1348     TCGv_i64 zero = tcg_constant_i64(0);
1349     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1350     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1351 
1352     return DISAS_NEXT;
1353 }
1354 
1355 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1356 {
1357     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1358 
1359     o->in1 = tcg_temp_new_i64();
1360     if (non_atomic) {
1361         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1362     } else {
1363         /* Perform the atomic addition in memory. */
1364         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1365                                      s->insn->data);
1366     }
1367 
1368     /* Recompute also for atomic case: needed for setting CC. */
1369     tcg_gen_add_i64(o->out, o->in1, o->in2);
1370 
1371     if (non_atomic) {
1372         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1373     }
1374     return DISAS_NEXT;
1375 }
1376 
1377 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1378 {
1379     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1380 
1381     o->in1 = tcg_temp_new_i64();
1382     if (non_atomic) {
1383         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1384     } else {
1385         /* Perform the atomic addition in memory. */
1386         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1387                                      s->insn->data);
1388     }
1389 
1390     /* Recompute also for atomic case: needed for setting CC. */
1391     tcg_gen_movi_i64(cc_src, 0);
1392     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1393 
1394     if (non_atomic) {
1395         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1396     }
1397     return DISAS_NEXT;
1398 }
1399 
1400 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1401 {
1402     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1403     return DISAS_NEXT;
1404 }
1405 
1406 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1407 {
1408     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1409     return DISAS_NEXT;
1410 }
1411 
1412 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1413 {
1414     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1415     return DISAS_NEXT;
1416 }
1417 
1418 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1419 {
1420     tcg_gen_and_i64(o->out, o->in1, o->in2);
1421     return DISAS_NEXT;
1422 }
1423 
1424 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1425 {
1426     int shift = s->insn->data & 0xff;
1427     int size = s->insn->data >> 8;
1428     uint64_t mask = ((1ull << size) - 1) << shift;
1429     TCGv_i64 t = tcg_temp_new_i64();
1430 
1431     tcg_gen_shli_i64(t, o->in2, shift);
1432     tcg_gen_ori_i64(t, t, ~mask);
1433     tcg_gen_and_i64(o->out, o->in1, t);
1434 
1435     /* Produce the CC from only the bits manipulated.  */
1436     tcg_gen_andi_i64(cc_dst, o->out, mask);
1437     set_cc_nz_u64(s, cc_dst);
1438     return DISAS_NEXT;
1439 }
1440 
1441 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1442 {
1443     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1444     return DISAS_NEXT;
1445 }
1446 
1447 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1448 {
1449     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1450     return DISAS_NEXT;
1451 }
1452 
1453 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1454 {
1455     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1456     return DISAS_NEXT;
1457 }
1458 
1459 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1460 {
1461     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1462     return DISAS_NEXT;
1463 }
1464 
1465 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1466 {
1467     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1468     return DISAS_NEXT;
1469 }
1470 
1471 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1472 {
1473     o->in1 = tcg_temp_new_i64();
1474 
1475     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1476         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1477     } else {
1478         /* Perform the atomic operation in memory. */
1479         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1480                                      s->insn->data);
1481     }
1482 
1483     /* Recompute also for atomic case: needed for setting CC. */
1484     tcg_gen_and_i64(o->out, o->in1, o->in2);
1485 
1486     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1487         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1488     }
1489     return DISAS_NEXT;
1490 }
1491 
1492 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1493 {
1494     pc_to_link_info(o->out, s, s->pc_tmp);
1495     if (o->in2) {
1496         tcg_gen_mov_i64(psw_addr, o->in2);
1497         per_branch(s, false);
1498         return DISAS_PC_UPDATED;
1499     } else {
1500         return DISAS_NEXT;
1501     }
1502 }
1503 
1504 static void save_link_info(DisasContext *s, DisasOps *o)
1505 {
1506     TCGv_i64 t;
1507 
1508     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1509         pc_to_link_info(o->out, s, s->pc_tmp);
1510         return;
1511     }
1512     gen_op_calc_cc(s);
1513     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1514     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1515     t = tcg_temp_new_i64();
1516     tcg_gen_shri_i64(t, psw_mask, 16);
1517     tcg_gen_andi_i64(t, t, 0x0f000000);
1518     tcg_gen_or_i64(o->out, o->out, t);
1519     tcg_gen_extu_i32_i64(t, cc_op);
1520     tcg_gen_shli_i64(t, t, 28);
1521     tcg_gen_or_i64(o->out, o->out, t);
1522 }
1523 
1524 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1525 {
1526     save_link_info(s, o);
1527     if (o->in2) {
1528         tcg_gen_mov_i64(psw_addr, o->in2);
1529         per_branch(s, false);
1530         return DISAS_PC_UPDATED;
1531     } else {
1532         return DISAS_NEXT;
1533     }
1534 }
1535 
1536 /*
1537  * Disassemble the target of a branch. The results are returned in a form
1538  * suitable for passing into help_branch():
1539  *
1540  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1541  *   branches, whose DisasContext *S contains the relative immediate field RI,
1542  *   are considered fixed. All the other branches are considered computed.
1543  * - int IMM is the value of RI.
1544  * - TCGv_i64 CDEST is the address of the computed target.
1545  */
1546 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1547     if (have_field(s, ri)) {                                                   \
1548         if (unlikely(s->ex_value)) {                                           \
1549             cdest = tcg_temp_new_i64();                                        \
1550             tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\
1551             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1552             is_imm = false;                                                    \
1553         } else {                                                               \
1554             is_imm = true;                                                     \
1555         }                                                                      \
1556     } else {                                                                   \
1557         is_imm = false;                                                        \
1558     }                                                                          \
1559     imm = is_imm ? get_field(s, ri) : 0;                                       \
1560 } while (false)
1561 
1562 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1563 {
1564     DisasCompare c;
1565     bool is_imm;
1566     int imm;
1567 
1568     pc_to_link_info(o->out, s, s->pc_tmp);
1569 
1570     disas_jdest(s, i2, is_imm, imm, o->in2);
1571     disas_jcc(s, &c, 0xf);
1572     return help_branch(s, &c, is_imm, imm, o->in2);
1573 }
1574 
1575 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1576 {
1577     int m1 = get_field(s, m1);
1578     DisasCompare c;
1579     bool is_imm;
1580     int imm;
1581 
1582     /* BCR with R2 = 0 causes no branching */
1583     if (have_field(s, r2) && get_field(s, r2) == 0) {
1584         if (m1 == 14) {
1585             /* Perform serialization */
1586             /* FIXME: check for fast-BCR-serialization facility */
1587             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1588         }
1589         if (m1 == 15) {
1590             /* Perform serialization */
1591             /* FIXME: perform checkpoint-synchronisation */
1592             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1593         }
1594         return DISAS_NEXT;
1595     }
1596 
1597     disas_jdest(s, i2, is_imm, imm, o->in2);
1598     disas_jcc(s, &c, m1);
1599     return help_branch(s, &c, is_imm, imm, o->in2);
1600 }
1601 
1602 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1603 {
1604     int r1 = get_field(s, r1);
1605     DisasCompare c;
1606     bool is_imm;
1607     TCGv_i64 t;
1608     int imm;
1609 
1610     c.cond = TCG_COND_NE;
1611     c.is_64 = false;
1612 
1613     t = tcg_temp_new_i64();
1614     tcg_gen_subi_i64(t, regs[r1], 1);
1615     store_reg32_i64(r1, t);
1616     c.u.s32.a = tcg_temp_new_i32();
1617     c.u.s32.b = tcg_constant_i32(0);
1618     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1619 
1620     disas_jdest(s, i2, is_imm, imm, o->in2);
1621     return help_branch(s, &c, is_imm, imm, o->in2);
1622 }
1623 
1624 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1625 {
1626     int r1 = get_field(s, r1);
1627     int imm = get_field(s, i2);
1628     DisasCompare c;
1629     TCGv_i64 t;
1630 
1631     c.cond = TCG_COND_NE;
1632     c.is_64 = false;
1633 
1634     t = tcg_temp_new_i64();
1635     tcg_gen_shri_i64(t, regs[r1], 32);
1636     tcg_gen_subi_i64(t, t, 1);
1637     store_reg32h_i64(r1, t);
1638     c.u.s32.a = tcg_temp_new_i32();
1639     c.u.s32.b = tcg_constant_i32(0);
1640     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1641 
1642     return help_branch(s, &c, 1, imm, o->in2);
1643 }
1644 
1645 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1646 {
1647     int r1 = get_field(s, r1);
1648     DisasCompare c;
1649     bool is_imm;
1650     int imm;
1651 
1652     c.cond = TCG_COND_NE;
1653     c.is_64 = true;
1654 
1655     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1656     c.u.s64.a = regs[r1];
1657     c.u.s64.b = tcg_constant_i64(0);
1658 
1659     disas_jdest(s, i2, is_imm, imm, o->in2);
1660     return help_branch(s, &c, is_imm, imm, o->in2);
1661 }
1662 
1663 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1664 {
1665     int r1 = get_field(s, r1);
1666     int r3 = get_field(s, r3);
1667     DisasCompare c;
1668     bool is_imm;
1669     TCGv_i64 t;
1670     int imm;
1671 
1672     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1673     c.is_64 = false;
1674 
1675     t = tcg_temp_new_i64();
1676     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1677     c.u.s32.a = tcg_temp_new_i32();
1678     c.u.s32.b = tcg_temp_new_i32();
1679     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1680     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1681     store_reg32_i64(r1, t);
1682 
1683     disas_jdest(s, i2, is_imm, imm, o->in2);
1684     return help_branch(s, &c, is_imm, imm, o->in2);
1685 }
1686 
1687 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1688 {
1689     int r1 = get_field(s, r1);
1690     int r3 = get_field(s, r3);
1691     DisasCompare c;
1692     bool is_imm;
1693     int imm;
1694 
1695     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1696     c.is_64 = true;
1697 
1698     if (r1 == (r3 | 1)) {
1699         c.u.s64.b = load_reg(r3 | 1);
1700     } else {
1701         c.u.s64.b = regs[r3 | 1];
1702     }
1703 
1704     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1705     c.u.s64.a = regs[r1];
1706 
1707     disas_jdest(s, i2, is_imm, imm, o->in2);
1708     return help_branch(s, &c, is_imm, imm, o->in2);
1709 }
1710 
1711 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1712 {
1713     int imm, m3 = get_field(s, m3);
1714     bool is_imm;
1715     DisasCompare c;
1716 
1717     c.cond = ltgt_cond[m3];
1718     if (s->insn->data) {
1719         c.cond = tcg_unsigned_cond(c.cond);
1720     }
1721     c.is_64 = true;
1722     c.u.s64.a = o->in1;
1723     c.u.s64.b = o->in2;
1724 
1725     o->out = NULL;
1726     disas_jdest(s, i4, is_imm, imm, o->out);
1727     if (!is_imm && !o->out) {
1728         imm = 0;
1729         o->out = get_address(s, 0, get_field(s, b4),
1730                              get_field(s, d4));
1731     }
1732 
1733     return help_branch(s, &c, is_imm, imm, o->out);
1734 }
1735 
1736 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1737 {
1738     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1739     set_cc_static(s);
1740     return DISAS_NEXT;
1741 }
1742 
1743 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1744 {
1745     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1746     set_cc_static(s);
1747     return DISAS_NEXT;
1748 }
1749 
1750 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1751 {
1752     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1753     set_cc_static(s);
1754     return DISAS_NEXT;
1755 }
1756 
1757 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1758                                    bool m4_with_fpe)
1759 {
1760     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1761     uint8_t m3 = get_field(s, m3);
1762     uint8_t m4 = get_field(s, m4);
1763 
1764     /* m3 field was introduced with FPE */
1765     if (!fpe && m3_with_fpe) {
1766         m3 = 0;
1767     }
1768     /* m4 field was introduced with FPE */
1769     if (!fpe && m4_with_fpe) {
1770         m4 = 0;
1771     }
1772 
1773     /* Check for valid rounding modes. Mode 3 was introduced later. */
1774     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1775         gen_program_exception(s, PGM_SPECIFICATION);
1776         return NULL;
1777     }
1778 
1779     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1780 }
1781 
1782 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1783 {
1784     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1785 
1786     if (!m34) {
1787         return DISAS_NORETURN;
1788     }
1789     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1790     set_cc_static(s);
1791     return DISAS_NEXT;
1792 }
1793 
1794 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1795 {
1796     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1797 
1798     if (!m34) {
1799         return DISAS_NORETURN;
1800     }
1801     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1802     set_cc_static(s);
1803     return DISAS_NEXT;
1804 }
1805 
1806 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1807 {
1808     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1809 
1810     if (!m34) {
1811         return DISAS_NORETURN;
1812     }
1813     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1814     set_cc_static(s);
1815     return DISAS_NEXT;
1816 }
1817 
1818 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1819 {
1820     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1821 
1822     if (!m34) {
1823         return DISAS_NORETURN;
1824     }
1825     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1826     set_cc_static(s);
1827     return DISAS_NEXT;
1828 }
1829 
1830 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1831 {
1832     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1833 
1834     if (!m34) {
1835         return DISAS_NORETURN;
1836     }
1837     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1838     set_cc_static(s);
1839     return DISAS_NEXT;
1840 }
1841 
1842 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1843 {
1844     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1845 
1846     if (!m34) {
1847         return DISAS_NORETURN;
1848     }
1849     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1850     set_cc_static(s);
1851     return DISAS_NEXT;
1852 }
1853 
1854 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1855 {
1856     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1857 
1858     if (!m34) {
1859         return DISAS_NORETURN;
1860     }
1861     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1862     set_cc_static(s);
1863     return DISAS_NEXT;
1864 }
1865 
1866 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1867 {
1868     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1869 
1870     if (!m34) {
1871         return DISAS_NORETURN;
1872     }
1873     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1874     set_cc_static(s);
1875     return DISAS_NEXT;
1876 }
1877 
1878 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1879 {
1880     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1881 
1882     if (!m34) {
1883         return DISAS_NORETURN;
1884     }
1885     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1886     set_cc_static(s);
1887     return DISAS_NEXT;
1888 }
1889 
1890 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1891 {
1892     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1893 
1894     if (!m34) {
1895         return DISAS_NORETURN;
1896     }
1897     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1898     set_cc_static(s);
1899     return DISAS_NEXT;
1900 }
1901 
1902 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1903 {
1904     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1905 
1906     if (!m34) {
1907         return DISAS_NORETURN;
1908     }
1909     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1910     set_cc_static(s);
1911     return DISAS_NEXT;
1912 }
1913 
1914 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1915 {
1916     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1917 
1918     if (!m34) {
1919         return DISAS_NORETURN;
1920     }
1921     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1922     set_cc_static(s);
1923     return DISAS_NEXT;
1924 }
1925 
1926 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1927 {
1928     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1929 
1930     if (!m34) {
1931         return DISAS_NORETURN;
1932     }
1933     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1934     return DISAS_NEXT;
1935 }
1936 
1937 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1938 {
1939     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1940 
1941     if (!m34) {
1942         return DISAS_NORETURN;
1943     }
1944     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1945     return DISAS_NEXT;
1946 }
1947 
1948 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1949 {
1950     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1951 
1952     if (!m34) {
1953         return DISAS_NORETURN;
1954     }
1955     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
1956     return DISAS_NEXT;
1957 }
1958 
1959 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1960 {
1961     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1962 
1963     if (!m34) {
1964         return DISAS_NORETURN;
1965     }
1966     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1967     return DISAS_NEXT;
1968 }
1969 
1970 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1971 {
1972     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1973 
1974     if (!m34) {
1975         return DISAS_NORETURN;
1976     }
1977     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
1978     return DISAS_NEXT;
1979 }
1980 
1981 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1982 {
1983     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1984 
1985     if (!m34) {
1986         return DISAS_NORETURN;
1987     }
1988     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
1989     return DISAS_NEXT;
1990 }
1991 
1992 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1993 {
1994     int r2 = get_field(s, r2);
1995     TCGv_i128 pair = tcg_temp_new_i128();
1996     TCGv_i64 len = tcg_temp_new_i64();
1997 
1998     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1999     set_cc_static(s);
2000     tcg_gen_extr_i128_i64(o->out, len, pair);
2001 
2002     tcg_gen_add_i64(regs[r2], regs[r2], len);
2003     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2004 
2005     return DISAS_NEXT;
2006 }
2007 
2008 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2009 {
2010     int l = get_field(s, l1);
2011     TCGv_i32 vl;
2012     MemOp mop;
2013 
2014     switch (l + 1) {
2015     case 1:
2016     case 2:
2017     case 4:
2018     case 8:
2019         mop = ctz32(l + 1) | MO_TE;
2020         tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop);
2021         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
2022         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2023         return DISAS_NEXT;
2024     default:
2025         vl = tcg_constant_i32(l);
2026         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2027         set_cc_static(s);
2028         return DISAS_NEXT;
2029     }
2030 }
2031 
2032 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2033 {
2034     int r1 = get_field(s, r1);
2035     int r2 = get_field(s, r2);
2036     TCGv_i32 t1, t2;
2037 
2038     /* r1 and r2 must be even.  */
2039     if (r1 & 1 || r2 & 1) {
2040         gen_program_exception(s, PGM_SPECIFICATION);
2041         return DISAS_NORETURN;
2042     }
2043 
2044     t1 = tcg_constant_i32(r1);
2045     t2 = tcg_constant_i32(r2);
2046     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2047     set_cc_static(s);
2048     return DISAS_NEXT;
2049 }
2050 
2051 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2052 {
2053     int r1 = get_field(s, r1);
2054     int r3 = get_field(s, r3);
2055     TCGv_i32 t1, t3;
2056 
2057     /* r1 and r3 must be even.  */
2058     if (r1 & 1 || r3 & 1) {
2059         gen_program_exception(s, PGM_SPECIFICATION);
2060         return DISAS_NORETURN;
2061     }
2062 
2063     t1 = tcg_constant_i32(r1);
2064     t3 = tcg_constant_i32(r3);
2065     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2066     set_cc_static(s);
2067     return DISAS_NEXT;
2068 }
2069 
2070 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2071 {
2072     int r1 = get_field(s, r1);
2073     int r3 = get_field(s, r3);
2074     TCGv_i32 t1, t3;
2075 
2076     /* r1 and r3 must be even.  */
2077     if (r1 & 1 || r3 & 1) {
2078         gen_program_exception(s, PGM_SPECIFICATION);
2079         return DISAS_NORETURN;
2080     }
2081 
2082     t1 = tcg_constant_i32(r1);
2083     t3 = tcg_constant_i32(r3);
2084     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2085     set_cc_static(s);
2086     return DISAS_NEXT;
2087 }
2088 
2089 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2090 {
2091     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2092     TCGv_i32 t1 = tcg_temp_new_i32();
2093 
2094     tcg_gen_extrl_i64_i32(t1, o->in1);
2095     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2096     set_cc_static(s);
2097     return DISAS_NEXT;
2098 }
2099 
2100 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2101 {
2102     TCGv_i128 pair = tcg_temp_new_i128();
2103 
2104     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2105     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2106 
2107     set_cc_static(s);
2108     return DISAS_NEXT;
2109 }
2110 
2111 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2112 {
2113     TCGv_i64 t = tcg_temp_new_i64();
2114     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2115     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2116     tcg_gen_or_i64(o->out, o->out, t);
2117     return DISAS_NEXT;
2118 }
2119 
2120 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2121 {
2122     int d2 = get_field(s, d2);
2123     int b2 = get_field(s, b2);
2124     TCGv_i64 addr, cc;
2125 
2126     /* Note that in1 = R3 (new value) and
2127        in2 = (zero-extended) R1 (expected value).  */
2128 
2129     addr = get_address(s, 0, b2, d2);
2130     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2131                                get_mem_index(s), s->insn->data | MO_ALIGN);
2132 
2133     /* Are the memory and expected values (un)equal?  Note that this setcond
2134        produces the output CC value, thus the NE sense of the test.  */
2135     cc = tcg_temp_new_i64();
2136     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2137     tcg_gen_extrl_i64_i32(cc_op, cc);
2138     set_cc_static(s);
2139 
2140     return DISAS_NEXT;
2141 }
2142 
2143 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2144 {
2145     int r1 = get_field(s, r1);
2146 
2147     o->out_128 = tcg_temp_new_i128();
2148     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2149 
2150     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2151     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2152                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2153 
2154     /*
2155      * Extract result into cc_dst:cc_src, compare vs the expected value
2156      * in the as yet unmodified input registers, then update CC_OP.
2157      */
2158     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2159     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2160     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2161     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2162     set_cc_nz_u64(s, cc_dst);
2163 
2164     return DISAS_NEXT;
2165 }
2166 
2167 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2168 {
2169     int r3 = get_field(s, r3);
2170     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2171 
2172     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2173         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2174     } else {
2175         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2176     }
2177 
2178     set_cc_static(s);
2179     return DISAS_NEXT;
2180 }
2181 
2182 #ifndef CONFIG_USER_ONLY
2183 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2184 {
2185     MemOp mop = s->insn->data;
2186     TCGv_i64 addr, old, cc;
2187     TCGLabel *lab = gen_new_label();
2188 
2189     /* Note that in1 = R1 (zero-extended expected value),
2190        out = R1 (original reg), out2 = R1+1 (new value).  */
2191 
2192     addr = tcg_temp_new_i64();
2193     old = tcg_temp_new_i64();
2194     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2195     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2196                                get_mem_index(s), mop | MO_ALIGN);
2197 
2198     /* Are the memory and expected values (un)equal?  */
2199     cc = tcg_temp_new_i64();
2200     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2201     tcg_gen_extrl_i64_i32(cc_op, cc);
2202 
2203     /* Write back the output now, so that it happens before the
2204        following branch, so that we don't need local temps.  */
2205     if ((mop & MO_SIZE) == MO_32) {
2206         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2207     } else {
2208         tcg_gen_mov_i64(o->out, old);
2209     }
2210 
2211     /* If the comparison was equal, and the LSB of R2 was set,
2212        then we need to flush the TLB (for all cpus).  */
2213     tcg_gen_xori_i64(cc, cc, 1);
2214     tcg_gen_and_i64(cc, cc, o->in2);
2215     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2216 
2217     gen_helper_purge(cpu_env);
2218     gen_set_label(lab);
2219 
2220     return DISAS_NEXT;
2221 }
2222 #endif
2223 
2224 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2225 {
2226     TCGv_i64 t1 = tcg_temp_new_i64();
2227     TCGv_i32 t2 = tcg_temp_new_i32();
2228     tcg_gen_extrl_i64_i32(t2, o->in1);
2229     gen_helper_cvd(t1, t2);
2230     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2231     return DISAS_NEXT;
2232 }
2233 
2234 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2235 {
2236     int m3 = get_field(s, m3);
2237     TCGLabel *lab = gen_new_label();
2238     TCGCond c;
2239 
2240     c = tcg_invert_cond(ltgt_cond[m3]);
2241     if (s->insn->data) {
2242         c = tcg_unsigned_cond(c);
2243     }
2244     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2245 
2246     /* Trap.  */
2247     gen_trap(s);
2248 
2249     gen_set_label(lab);
2250     return DISAS_NEXT;
2251 }
2252 
2253 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2254 {
2255     int m3 = get_field(s, m3);
2256     int r1 = get_field(s, r1);
2257     int r2 = get_field(s, r2);
2258     TCGv_i32 tr1, tr2, chk;
2259 
2260     /* R1 and R2 must both be even.  */
2261     if ((r1 | r2) & 1) {
2262         gen_program_exception(s, PGM_SPECIFICATION);
2263         return DISAS_NORETURN;
2264     }
2265     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2266         m3 = 0;
2267     }
2268 
2269     tr1 = tcg_constant_i32(r1);
2270     tr2 = tcg_constant_i32(r2);
2271     chk = tcg_constant_i32(m3);
2272 
2273     switch (s->insn->data) {
2274     case 12:
2275         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2276         break;
2277     case 14:
2278         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2279         break;
2280     case 21:
2281         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2282         break;
2283     case 24:
2284         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2285         break;
2286     case 41:
2287         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2288         break;
2289     case 42:
2290         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2291         break;
2292     default:
2293         g_assert_not_reached();
2294     }
2295 
2296     set_cc_static(s);
2297     return DISAS_NEXT;
2298 }
2299 
2300 #ifndef CONFIG_USER_ONLY
2301 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2302 {
2303     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2304     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2305     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2306 
2307     gen_helper_diag(cpu_env, r1, r3, func_code);
2308     return DISAS_NEXT;
2309 }
2310 #endif
2311 
2312 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2313 {
2314     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2315     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2316     return DISAS_NEXT;
2317 }
2318 
2319 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2320 {
2321     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2322     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2323     return DISAS_NEXT;
2324 }
2325 
2326 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2327 {
2328     TCGv_i128 t = tcg_temp_new_i128();
2329 
2330     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2331     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2332     return DISAS_NEXT;
2333 }
2334 
2335 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2336 {
2337     TCGv_i128 t = tcg_temp_new_i128();
2338 
2339     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2340     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2341     return DISAS_NEXT;
2342 }
2343 
2344 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2345 {
2346     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2347     return DISAS_NEXT;
2348 }
2349 
2350 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2351 {
2352     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2353     return DISAS_NEXT;
2354 }
2355 
2356 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2357 {
2358     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2359     return DISAS_NEXT;
2360 }
2361 
2362 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2363 {
2364     int r2 = get_field(s, r2);
2365     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2366     return DISAS_NEXT;
2367 }
2368 
2369 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2370 {
2371     /* No cache information provided.  */
2372     tcg_gen_movi_i64(o->out, -1);
2373     return DISAS_NEXT;
2374 }
2375 
2376 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2377 {
2378     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2379     return DISAS_NEXT;
2380 }
2381 
2382 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2383 {
2384     int r1 = get_field(s, r1);
2385     int r2 = get_field(s, r2);
2386     TCGv_i64 t = tcg_temp_new_i64();
2387 
2388     /* Note the "subsequently" in the PoO, which implies a defined result
2389        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2390     tcg_gen_shri_i64(t, psw_mask, 32);
2391     store_reg32_i64(r1, t);
2392     if (r2 != 0) {
2393         store_reg32_i64(r2, psw_mask);
2394     }
2395     return DISAS_NEXT;
2396 }
2397 
2398 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2399 {
2400     int r1 = get_field(s, r1);
2401     TCGv_i32 ilen;
2402     TCGv_i64 v1;
2403 
2404     /* Nested EXECUTE is not allowed.  */
2405     if (unlikely(s->ex_value)) {
2406         gen_program_exception(s, PGM_EXECUTE);
2407         return DISAS_NORETURN;
2408     }
2409 
2410     update_psw_addr(s);
2411     update_cc_op(s);
2412 
2413     if (r1 == 0) {
2414         v1 = tcg_constant_i64(0);
2415     } else {
2416         v1 = regs[r1];
2417     }
2418 
2419     ilen = tcg_constant_i32(s->ilen);
2420     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2421 
2422     return DISAS_PC_CC_UPDATED;
2423 }
2424 
2425 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2426 {
2427     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2428 
2429     if (!m34) {
2430         return DISAS_NORETURN;
2431     }
2432     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2433     return DISAS_NEXT;
2434 }
2435 
2436 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2437 {
2438     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2439 
2440     if (!m34) {
2441         return DISAS_NORETURN;
2442     }
2443     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2444     return DISAS_NEXT;
2445 }
2446 
2447 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2448 {
2449     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2450 
2451     if (!m34) {
2452         return DISAS_NORETURN;
2453     }
2454     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2455     return DISAS_NEXT;
2456 }
2457 
2458 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2459 {
2460     /* We'll use the original input for cc computation, since we get to
2461        compare that against 0, which ought to be better than comparing
2462        the real output against 64.  It also lets cc_dst be a convenient
2463        temporary during our computation.  */
2464     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2465 
2466     /* R1 = IN ? CLZ(IN) : 64.  */
2467     tcg_gen_clzi_i64(o->out, o->in2, 64);
2468 
2469     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2470        value by 64, which is undefined.  But since the shift is 64 iff the
2471        input is zero, we still get the correct result after and'ing.  */
2472     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2473     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2474     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2475     return DISAS_NEXT;
2476 }
2477 
2478 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2479 {
2480     int m3 = get_field(s, m3);
2481     int pos, len, base = s->insn->data;
2482     TCGv_i64 tmp = tcg_temp_new_i64();
2483     uint64_t ccm;
2484 
2485     switch (m3) {
2486     case 0xf:
2487         /* Effectively a 32-bit load.  */
2488         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2489         len = 32;
2490         goto one_insert;
2491 
2492     case 0xc:
2493     case 0x6:
2494     case 0x3:
2495         /* Effectively a 16-bit load.  */
2496         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2497         len = 16;
2498         goto one_insert;
2499 
2500     case 0x8:
2501     case 0x4:
2502     case 0x2:
2503     case 0x1:
2504         /* Effectively an 8-bit load.  */
2505         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2506         len = 8;
2507         goto one_insert;
2508 
2509     one_insert:
2510         pos = base + ctz32(m3) * 8;
2511         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2512         ccm = ((1ull << len) - 1) << pos;
2513         break;
2514 
2515     default:
2516         /* This is going to be a sequence of loads and inserts.  */
2517         pos = base + 32 - 8;
2518         ccm = 0;
2519         while (m3) {
2520             if (m3 & 0x8) {
2521                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2522                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2523                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2524                 ccm |= 0xffull << pos;
2525             }
2526             m3 = (m3 << 1) & 0xf;
2527             pos -= 8;
2528         }
2529         break;
2530     }
2531 
2532     tcg_gen_movi_i64(tmp, ccm);
2533     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2534     return DISAS_NEXT;
2535 }
2536 
2537 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2538 {
2539     int shift = s->insn->data & 0xff;
2540     int size = s->insn->data >> 8;
2541     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2542     return DISAS_NEXT;
2543 }
2544 
2545 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2546 {
2547     TCGv_i64 t1, t2;
2548 
2549     gen_op_calc_cc(s);
2550     t1 = tcg_temp_new_i64();
2551     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2552     t2 = tcg_temp_new_i64();
2553     tcg_gen_extu_i32_i64(t2, cc_op);
2554     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2555     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2556     return DISAS_NEXT;
2557 }
2558 
2559 #ifndef CONFIG_USER_ONLY
2560 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2561 {
2562     TCGv_i32 m4;
2563 
2564     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2565         m4 = tcg_constant_i32(get_field(s, m4));
2566     } else {
2567         m4 = tcg_constant_i32(0);
2568     }
2569     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2570     return DISAS_NEXT;
2571 }
2572 
2573 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2574 {
2575     TCGv_i32 m4;
2576 
2577     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2578         m4 = tcg_constant_i32(get_field(s, m4));
2579     } else {
2580         m4 = tcg_constant_i32(0);
2581     }
2582     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2583     return DISAS_NEXT;
2584 }
2585 
2586 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2587 {
2588     gen_helper_iske(o->out, cpu_env, o->in2);
2589     return DISAS_NEXT;
2590 }
2591 #endif
2592 
2593 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2594 {
2595     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2596     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2597     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2598     TCGv_i32 t_r1, t_r2, t_r3, type;
2599 
2600     switch (s->insn->data) {
2601     case S390_FEAT_TYPE_KMA:
2602         if (r3 == r1 || r3 == r2) {
2603             gen_program_exception(s, PGM_SPECIFICATION);
2604             return DISAS_NORETURN;
2605         }
2606         /* FALL THROUGH */
2607     case S390_FEAT_TYPE_KMCTR:
2608         if (r3 & 1 || !r3) {
2609             gen_program_exception(s, PGM_SPECIFICATION);
2610             return DISAS_NORETURN;
2611         }
2612         /* FALL THROUGH */
2613     case S390_FEAT_TYPE_PPNO:
2614     case S390_FEAT_TYPE_KMF:
2615     case S390_FEAT_TYPE_KMC:
2616     case S390_FEAT_TYPE_KMO:
2617     case S390_FEAT_TYPE_KM:
2618         if (r1 & 1 || !r1) {
2619             gen_program_exception(s, PGM_SPECIFICATION);
2620             return DISAS_NORETURN;
2621         }
2622         /* FALL THROUGH */
2623     case S390_FEAT_TYPE_KMAC:
2624     case S390_FEAT_TYPE_KIMD:
2625     case S390_FEAT_TYPE_KLMD:
2626         if (r2 & 1 || !r2) {
2627             gen_program_exception(s, PGM_SPECIFICATION);
2628             return DISAS_NORETURN;
2629         }
2630         /* FALL THROUGH */
2631     case S390_FEAT_TYPE_PCKMO:
2632     case S390_FEAT_TYPE_PCC:
2633         break;
2634     default:
2635         g_assert_not_reached();
2636     };
2637 
2638     t_r1 = tcg_constant_i32(r1);
2639     t_r2 = tcg_constant_i32(r2);
2640     t_r3 = tcg_constant_i32(r3);
2641     type = tcg_constant_i32(s->insn->data);
2642     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2643     set_cc_static(s);
2644     return DISAS_NEXT;
2645 }
2646 
2647 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2648 {
2649     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2650     set_cc_static(s);
2651     return DISAS_NEXT;
2652 }
2653 
2654 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2655 {
2656     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2657     set_cc_static(s);
2658     return DISAS_NEXT;
2659 }
2660 
2661 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2662 {
2663     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2664     set_cc_static(s);
2665     return DISAS_NEXT;
2666 }
2667 
2668 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2669 {
2670     /* The real output is indeed the original value in memory;
2671        recompute the addition for the computation of CC.  */
2672     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2673                                  s->insn->data | MO_ALIGN);
2674     /* However, we need to recompute the addition for setting CC.  */
2675     tcg_gen_add_i64(o->out, o->in1, o->in2);
2676     return DISAS_NEXT;
2677 }
2678 
2679 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2680 {
2681     /* The real output is indeed the original value in memory;
2682        recompute the addition for the computation of CC.  */
2683     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2684                                  s->insn->data | MO_ALIGN);
2685     /* However, we need to recompute the operation for setting CC.  */
2686     tcg_gen_and_i64(o->out, o->in1, o->in2);
2687     return DISAS_NEXT;
2688 }
2689 
2690 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2691 {
2692     /* The real output is indeed the original value in memory;
2693        recompute the addition for the computation of CC.  */
2694     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2695                                 s->insn->data | MO_ALIGN);
2696     /* However, we need to recompute the operation for setting CC.  */
2697     tcg_gen_or_i64(o->out, o->in1, o->in2);
2698     return DISAS_NEXT;
2699 }
2700 
2701 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2702 {
2703     /* The real output is indeed the original value in memory;
2704        recompute the addition for the computation of CC.  */
2705     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2706                                  s->insn->data | MO_ALIGN);
2707     /* However, we need to recompute the operation for setting CC.  */
2708     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2709     return DISAS_NEXT;
2710 }
2711 
2712 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2713 {
2714     gen_helper_ldeb(o->out, cpu_env, o->in2);
2715     return DISAS_NEXT;
2716 }
2717 
2718 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2719 {
2720     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2721 
2722     if (!m34) {
2723         return DISAS_NORETURN;
2724     }
2725     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2726     return DISAS_NEXT;
2727 }
2728 
2729 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2730 {
2731     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2732 
2733     if (!m34) {
2734         return DISAS_NORETURN;
2735     }
2736     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2737     return DISAS_NEXT;
2738 }
2739 
2740 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2741 {
2742     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2743 
2744     if (!m34) {
2745         return DISAS_NORETURN;
2746     }
2747     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2748     return DISAS_NEXT;
2749 }
2750 
2751 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2752 {
2753     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2754     return DISAS_NEXT;
2755 }
2756 
2757 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2758 {
2759     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2760     return DISAS_NEXT;
2761 }
2762 
2763 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2764 {
2765     tcg_gen_shli_i64(o->out, o->in2, 32);
2766     return DISAS_NEXT;
2767 }
2768 
2769 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2770 {
2771     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2772     return DISAS_NEXT;
2773 }
2774 
2775 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2776 {
2777     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2778     return DISAS_NEXT;
2779 }
2780 
2781 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2782 {
2783     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2784     return DISAS_NEXT;
2785 }
2786 
2787 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2788 {
2789     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2790     return DISAS_NEXT;
2791 }
2792 
2793 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2794 {
2795     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2796     return DISAS_NEXT;
2797 }
2798 
2799 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2800 {
2801     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2802                        MO_TESL | s->insn->data);
2803     return DISAS_NEXT;
2804 }
2805 
2806 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2807 {
2808     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2809                        MO_TEUL | s->insn->data);
2810     return DISAS_NEXT;
2811 }
2812 
2813 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2814 {
2815     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2816                         MO_TEUQ | s->insn->data);
2817     return DISAS_NEXT;
2818 }
2819 
2820 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2821 {
2822     TCGLabel *lab = gen_new_label();
2823     store_reg32_i64(get_field(s, r1), o->in2);
2824     /* The value is stored even in case of trap. */
2825     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2826     gen_trap(s);
2827     gen_set_label(lab);
2828     return DISAS_NEXT;
2829 }
2830 
2831 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2832 {
2833     TCGLabel *lab = gen_new_label();
2834     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2835     /* The value is stored even in case of trap. */
2836     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2837     gen_trap(s);
2838     gen_set_label(lab);
2839     return DISAS_NEXT;
2840 }
2841 
2842 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2843 {
2844     TCGLabel *lab = gen_new_label();
2845     store_reg32h_i64(get_field(s, r1), o->in2);
2846     /* The value is stored even in case of trap. */
2847     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2848     gen_trap(s);
2849     gen_set_label(lab);
2850     return DISAS_NEXT;
2851 }
2852 
2853 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2854 {
2855     TCGLabel *lab = gen_new_label();
2856 
2857     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2858     /* The value is stored even in case of trap. */
2859     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2860     gen_trap(s);
2861     gen_set_label(lab);
2862     return DISAS_NEXT;
2863 }
2864 
2865 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2866 {
2867     TCGLabel *lab = gen_new_label();
2868     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2869     /* The value is stored even in case of trap. */
2870     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2871     gen_trap(s);
2872     gen_set_label(lab);
2873     return DISAS_NEXT;
2874 }
2875 
2876 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2877 {
2878     DisasCompare c;
2879 
2880     if (have_field(s, m3)) {
2881         /* LOAD * ON CONDITION */
2882         disas_jcc(s, &c, get_field(s, m3));
2883     } else {
2884         /* SELECT */
2885         disas_jcc(s, &c, get_field(s, m4));
2886     }
2887 
2888     if (c.is_64) {
2889         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2890                             o->in2, o->in1);
2891     } else {
2892         TCGv_i32 t32 = tcg_temp_new_i32();
2893         TCGv_i64 t, z;
2894 
2895         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2896 
2897         t = tcg_temp_new_i64();
2898         tcg_gen_extu_i32_i64(t, t32);
2899 
2900         z = tcg_constant_i64(0);
2901         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2902     }
2903 
2904     return DISAS_NEXT;
2905 }
2906 
2907 #ifndef CONFIG_USER_ONLY
2908 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2909 {
2910     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2911     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2912 
2913     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2914     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2915     s->exit_to_mainloop = true;
2916     return DISAS_TOO_MANY;
2917 }
2918 
2919 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2920 {
2921     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2922     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2923 
2924     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2925     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2926     s->exit_to_mainloop = true;
2927     return DISAS_TOO_MANY;
2928 }
2929 
2930 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2931 {
2932     gen_helper_lra(o->out, cpu_env, o->in2);
2933     set_cc_static(s);
2934     return DISAS_NEXT;
2935 }
2936 
2937 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2938 {
2939     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2940     return DISAS_NEXT;
2941 }
2942 
2943 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2944 {
2945     TCGv_i64 mask, addr;
2946 
2947     per_breaking_event(s);
2948 
2949     /*
2950      * Convert the short PSW into the normal PSW, similar to what
2951      * s390_cpu_load_normal() does.
2952      */
2953     mask = tcg_temp_new_i64();
2954     addr = tcg_temp_new_i64();
2955     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2956     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2957     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2958     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2959     gen_helper_load_psw(cpu_env, mask, addr);
2960     return DISAS_NORETURN;
2961 }
2962 
2963 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2964 {
2965     TCGv_i64 t1, t2;
2966 
2967     per_breaking_event(s);
2968 
2969     t1 = tcg_temp_new_i64();
2970     t2 = tcg_temp_new_i64();
2971     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2972                         MO_TEUQ | MO_ALIGN_8);
2973     tcg_gen_addi_i64(o->in2, o->in2, 8);
2974     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2975     gen_helper_load_psw(cpu_env, t1, t2);
2976     return DISAS_NORETURN;
2977 }
2978 #endif
2979 
2980 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2981 {
2982     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2983     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2984 
2985     gen_helper_lam(cpu_env, r1, o->in2, r3);
2986     return DISAS_NEXT;
2987 }
2988 
2989 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2990 {
2991     int r1 = get_field(s, r1);
2992     int r3 = get_field(s, r3);
2993     TCGv_i64 t1, t2;
2994 
2995     /* Only one register to read. */
2996     t1 = tcg_temp_new_i64();
2997     if (unlikely(r1 == r3)) {
2998         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
2999         store_reg32_i64(r1, t1);
3000         return DISAS_NEXT;
3001     }
3002 
3003     /* First load the values of the first and last registers to trigger
3004        possible page faults. */
3005     t2 = tcg_temp_new_i64();
3006     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3007     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3008     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3009     store_reg32_i64(r1, t1);
3010     store_reg32_i64(r3, t2);
3011 
3012     /* Only two registers to read. */
3013     if (((r1 + 1) & 15) == r3) {
3014         return DISAS_NEXT;
3015     }
3016 
3017     /* Then load the remaining registers. Page fault can't occur. */
3018     r3 = (r3 - 1) & 15;
3019     tcg_gen_movi_i64(t2, 4);
3020     while (r1 != r3) {
3021         r1 = (r1 + 1) & 15;
3022         tcg_gen_add_i64(o->in2, o->in2, t2);
3023         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3024         store_reg32_i64(r1, t1);
3025     }
3026     return DISAS_NEXT;
3027 }
3028 
3029 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3030 {
3031     int r1 = get_field(s, r1);
3032     int r3 = get_field(s, r3);
3033     TCGv_i64 t1, t2;
3034 
3035     /* Only one register to read. */
3036     t1 = tcg_temp_new_i64();
3037     if (unlikely(r1 == r3)) {
3038         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3039         store_reg32h_i64(r1, t1);
3040         return DISAS_NEXT;
3041     }
3042 
3043     /* First load the values of the first and last registers to trigger
3044        possible page faults. */
3045     t2 = tcg_temp_new_i64();
3046     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3047     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3048     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3049     store_reg32h_i64(r1, t1);
3050     store_reg32h_i64(r3, t2);
3051 
3052     /* Only two registers to read. */
3053     if (((r1 + 1) & 15) == r3) {
3054         return DISAS_NEXT;
3055     }
3056 
3057     /* Then load the remaining registers. Page fault can't occur. */
3058     r3 = (r3 - 1) & 15;
3059     tcg_gen_movi_i64(t2, 4);
3060     while (r1 != r3) {
3061         r1 = (r1 + 1) & 15;
3062         tcg_gen_add_i64(o->in2, o->in2, t2);
3063         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3064         store_reg32h_i64(r1, t1);
3065     }
3066     return DISAS_NEXT;
3067 }
3068 
3069 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3070 {
3071     int r1 = get_field(s, r1);
3072     int r3 = get_field(s, r3);
3073     TCGv_i64 t1, t2;
3074 
3075     /* Only one register to read. */
3076     if (unlikely(r1 == r3)) {
3077         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3078         return DISAS_NEXT;
3079     }
3080 
3081     /* First load the values of the first and last registers to trigger
3082        possible page faults. */
3083     t1 = tcg_temp_new_i64();
3084     t2 = tcg_temp_new_i64();
3085     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3086     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3087     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3088     tcg_gen_mov_i64(regs[r1], t1);
3089 
3090     /* Only two registers to read. */
3091     if (((r1 + 1) & 15) == r3) {
3092         return DISAS_NEXT;
3093     }
3094 
3095     /* Then load the remaining registers. Page fault can't occur. */
3096     r3 = (r3 - 1) & 15;
3097     tcg_gen_movi_i64(t1, 8);
3098     while (r1 != r3) {
3099         r1 = (r1 + 1) & 15;
3100         tcg_gen_add_i64(o->in2, o->in2, t1);
3101         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3102     }
3103     return DISAS_NEXT;
3104 }
3105 
3106 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3107 {
3108     TCGv_i64 a1, a2;
3109     MemOp mop = s->insn->data;
3110 
3111     /* In a parallel context, stop the world and single step.  */
3112     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3113         update_psw_addr(s);
3114         update_cc_op(s);
3115         gen_exception(EXCP_ATOMIC);
3116         return DISAS_NORETURN;
3117     }
3118 
3119     /* In a serial context, perform the two loads ... */
3120     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3121     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3122     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3123     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3124 
3125     /* ... and indicate that we performed them while interlocked.  */
3126     gen_op_movi_cc(s, 0);
3127     return DISAS_NEXT;
3128 }
3129 
3130 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3131 {
3132     o->out_128 = tcg_temp_new_i128();
3133     tcg_gen_qemu_ld_i128(o->out_128, o->in2, get_mem_index(s),
3134                          MO_TE | MO_128 | MO_ALIGN);
3135     return DISAS_NEXT;
3136 }
3137 
3138 #ifndef CONFIG_USER_ONLY
3139 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3140 {
3141     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3142     return DISAS_NEXT;
3143 }
3144 #endif
3145 
3146 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3147 {
3148     tcg_gen_andi_i64(o->out, o->in2, -256);
3149     return DISAS_NEXT;
3150 }
3151 
3152 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3153 {
3154     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3155 
3156     if (get_field(s, m3) > 6) {
3157         gen_program_exception(s, PGM_SPECIFICATION);
3158         return DISAS_NORETURN;
3159     }
3160 
3161     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3162     tcg_gen_neg_i64(o->addr1, o->addr1);
3163     tcg_gen_movi_i64(o->out, 16);
3164     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3165     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3166     return DISAS_NEXT;
3167 }
3168 
3169 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3170 {
3171     const uint16_t monitor_class = get_field(s, i2);
3172 
3173     if (monitor_class & 0xff00) {
3174         gen_program_exception(s, PGM_SPECIFICATION);
3175         return DISAS_NORETURN;
3176     }
3177 
3178 #if !defined(CONFIG_USER_ONLY)
3179     gen_helper_monitor_call(cpu_env, o->addr1,
3180                             tcg_constant_i32(monitor_class));
3181 #endif
3182     /* Defaults to a NOP. */
3183     return DISAS_NEXT;
3184 }
3185 
3186 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3187 {
3188     o->out = o->in2;
3189     o->in2 = NULL;
3190     return DISAS_NEXT;
3191 }
3192 
3193 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3194 {
3195     int b2 = get_field(s, b2);
3196     TCGv ar1 = tcg_temp_new_i64();
3197 
3198     o->out = o->in2;
3199     o->in2 = NULL;
3200 
3201     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3202     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3203         tcg_gen_movi_i64(ar1, 0);
3204         break;
3205     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3206         tcg_gen_movi_i64(ar1, 1);
3207         break;
3208     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3209         if (b2) {
3210             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3211         } else {
3212             tcg_gen_movi_i64(ar1, 0);
3213         }
3214         break;
3215     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3216         tcg_gen_movi_i64(ar1, 2);
3217         break;
3218     }
3219 
3220     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3221     return DISAS_NEXT;
3222 }
3223 
3224 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3225 {
3226     o->out = o->in1;
3227     o->out2 = o->in2;
3228     o->in1 = NULL;
3229     o->in2 = NULL;
3230     return DISAS_NEXT;
3231 }
3232 
3233 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3234 {
3235     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3236 
3237     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3238     return DISAS_NEXT;
3239 }
3240 
3241 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3242 {
3243     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3244     return DISAS_NEXT;
3245 }
3246 
3247 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3248 {
3249     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3250 
3251     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3252     return DISAS_NEXT;
3253 }
3254 
3255 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3256 {
3257     int r1 = get_field(s, r1);
3258     int r2 = get_field(s, r2);
3259     TCGv_i32 t1, t2;
3260 
3261     /* r1 and r2 must be even.  */
3262     if (r1 & 1 || r2 & 1) {
3263         gen_program_exception(s, PGM_SPECIFICATION);
3264         return DISAS_NORETURN;
3265     }
3266 
3267     t1 = tcg_constant_i32(r1);
3268     t2 = tcg_constant_i32(r2);
3269     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3270     set_cc_static(s);
3271     return DISAS_NEXT;
3272 }
3273 
3274 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3275 {
3276     int r1 = get_field(s, r1);
3277     int r3 = get_field(s, r3);
3278     TCGv_i32 t1, t3;
3279 
3280     /* r1 and r3 must be even.  */
3281     if (r1 & 1 || r3 & 1) {
3282         gen_program_exception(s, PGM_SPECIFICATION);
3283         return DISAS_NORETURN;
3284     }
3285 
3286     t1 = tcg_constant_i32(r1);
3287     t3 = tcg_constant_i32(r3);
3288     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3289     set_cc_static(s);
3290     return DISAS_NEXT;
3291 }
3292 
3293 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3294 {
3295     int r1 = get_field(s, r1);
3296     int r3 = get_field(s, r3);
3297     TCGv_i32 t1, t3;
3298 
3299     /* r1 and r3 must be even.  */
3300     if (r1 & 1 || r3 & 1) {
3301         gen_program_exception(s, PGM_SPECIFICATION);
3302         return DISAS_NORETURN;
3303     }
3304 
3305     t1 = tcg_constant_i32(r1);
3306     t3 = tcg_constant_i32(r3);
3307     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3308     set_cc_static(s);
3309     return DISAS_NEXT;
3310 }
3311 
3312 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3313 {
3314     int r3 = get_field(s, r3);
3315     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3316     set_cc_static(s);
3317     return DISAS_NEXT;
3318 }
3319 
3320 #ifndef CONFIG_USER_ONLY
3321 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3322 {
3323     int r1 = get_field(s, l1);
3324     int r3 = get_field(s, r3);
3325     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3326     set_cc_static(s);
3327     return DISAS_NEXT;
3328 }
3329 
3330 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3331 {
3332     int r1 = get_field(s, l1);
3333     int r3 = get_field(s, r3);
3334     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3335     set_cc_static(s);
3336     return DISAS_NEXT;
3337 }
3338 #endif
3339 
3340 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3341 {
3342     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3343 
3344     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3345     return DISAS_NEXT;
3346 }
3347 
3348 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3349 {
3350     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3351 
3352     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3353     return DISAS_NEXT;
3354 }
3355 
3356 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3357 {
3358     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3359     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3360 
3361     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3362     set_cc_static(s);
3363     return DISAS_NEXT;
3364 }
3365 
3366 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3367 {
3368     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3369     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3370 
3371     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3372     set_cc_static(s);
3373     return DISAS_NEXT;
3374 }
3375 
3376 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3377 {
3378     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3379 
3380     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3381     return DISAS_NEXT;
3382 }
3383 
3384 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3385 {
3386     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3387     return DISAS_NEXT;
3388 }
3389 
3390 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3391 {
3392     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3393     return DISAS_NEXT;
3394 }
3395 
3396 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3397 {
3398     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3399     return DISAS_NEXT;
3400 }
3401 
3402 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3403 {
3404     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3405     return DISAS_NEXT;
3406 }
3407 
3408 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3409 {
3410     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3411     return DISAS_NEXT;
3412 }
3413 
3414 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3415 {
3416     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3417     return DISAS_NEXT;
3418 }
3419 
3420 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3421 {
3422     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3423     return DISAS_NEXT;
3424 }
3425 
3426 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3427 {
3428     gen_helper_mxdb(o->out_128, cpu_env, o->in1_128, o->in2);
3429     return DISAS_NEXT;
3430 }
3431 
3432 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3433 {
3434     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3435     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3436     return DISAS_NEXT;
3437 }
3438 
3439 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3440 {
3441     TCGv_i64 r3 = load_freg(get_field(s, r3));
3442     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3443     return DISAS_NEXT;
3444 }
3445 
3446 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3447 {
3448     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3449     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3450     return DISAS_NEXT;
3451 }
3452 
3453 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3454 {
3455     TCGv_i64 r3 = load_freg(get_field(s, r3));
3456     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3457     return DISAS_NEXT;
3458 }
3459 
3460 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3461 {
3462     TCGv_i64 z = tcg_constant_i64(0);
3463     TCGv_i64 n = tcg_temp_new_i64();
3464 
3465     tcg_gen_neg_i64(n, o->in2);
3466     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3467     return DISAS_NEXT;
3468 }
3469 
3470 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3471 {
3472     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3473     return DISAS_NEXT;
3474 }
3475 
3476 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3477 {
3478     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3479     return DISAS_NEXT;
3480 }
3481 
3482 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3483 {
3484     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3485     tcg_gen_mov_i64(o->out2, o->in2);
3486     return DISAS_NEXT;
3487 }
3488 
3489 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3490 {
3491     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3492 
3493     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3494     set_cc_static(s);
3495     return DISAS_NEXT;
3496 }
3497 
3498 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3499 {
3500     tcg_gen_neg_i64(o->out, o->in2);
3501     return DISAS_NEXT;
3502 }
3503 
3504 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3505 {
3506     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3507     return DISAS_NEXT;
3508 }
3509 
3510 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3511 {
3512     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3513     return DISAS_NEXT;
3514 }
3515 
3516 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3517 {
3518     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3519     tcg_gen_mov_i64(o->out2, o->in2);
3520     return DISAS_NEXT;
3521 }
3522 
3523 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3524 {
3525     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3526 
3527     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3528     set_cc_static(s);
3529     return DISAS_NEXT;
3530 }
3531 
3532 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3533 {
3534     tcg_gen_or_i64(o->out, o->in1, o->in2);
3535     return DISAS_NEXT;
3536 }
3537 
3538 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3539 {
3540     int shift = s->insn->data & 0xff;
3541     int size = s->insn->data >> 8;
3542     uint64_t mask = ((1ull << size) - 1) << shift;
3543     TCGv_i64 t = tcg_temp_new_i64();
3544 
3545     tcg_gen_shli_i64(t, o->in2, shift);
3546     tcg_gen_or_i64(o->out, o->in1, t);
3547 
3548     /* Produce the CC from only the bits manipulated.  */
3549     tcg_gen_andi_i64(cc_dst, o->out, mask);
3550     set_cc_nz_u64(s, cc_dst);
3551     return DISAS_NEXT;
3552 }
3553 
3554 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3555 {
3556     o->in1 = tcg_temp_new_i64();
3557 
3558     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3559         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3560     } else {
3561         /* Perform the atomic operation in memory. */
3562         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3563                                     s->insn->data);
3564     }
3565 
3566     /* Recompute also for atomic case: needed for setting CC. */
3567     tcg_gen_or_i64(o->out, o->in1, o->in2);
3568 
3569     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3570         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3571     }
3572     return DISAS_NEXT;
3573 }
3574 
3575 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3576 {
3577     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3578 
3579     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3580     return DISAS_NEXT;
3581 }
3582 
3583 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3584 {
3585     int l2 = get_field(s, l2) + 1;
3586     TCGv_i32 l;
3587 
3588     /* The length must not exceed 32 bytes.  */
3589     if (l2 > 32) {
3590         gen_program_exception(s, PGM_SPECIFICATION);
3591         return DISAS_NORETURN;
3592     }
3593     l = tcg_constant_i32(l2);
3594     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3595     return DISAS_NEXT;
3596 }
3597 
3598 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3599 {
3600     int l2 = get_field(s, l2) + 1;
3601     TCGv_i32 l;
3602 
3603     /* The length must be even and should not exceed 64 bytes.  */
3604     if ((l2 & 1) || (l2 > 64)) {
3605         gen_program_exception(s, PGM_SPECIFICATION);
3606         return DISAS_NORETURN;
3607     }
3608     l = tcg_constant_i32(l2);
3609     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3610     return DISAS_NEXT;
3611 }
3612 
3613 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3614 {
3615     const uint8_t m3 = get_field(s, m3);
3616 
3617     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3618         tcg_gen_ctpop_i64(o->out, o->in2);
3619     } else {
3620         gen_helper_popcnt(o->out, o->in2);
3621     }
3622     return DISAS_NEXT;
3623 }
3624 
3625 #ifndef CONFIG_USER_ONLY
3626 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3627 {
3628     gen_helper_ptlb(cpu_env);
3629     return DISAS_NEXT;
3630 }
3631 #endif
3632 
3633 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3634 {
3635     int i3 = get_field(s, i3);
3636     int i4 = get_field(s, i4);
3637     int i5 = get_field(s, i5);
3638     int do_zero = i4 & 0x80;
3639     uint64_t mask, imask, pmask;
3640     int pos, len, rot;
3641 
3642     /* Adjust the arguments for the specific insn.  */
3643     switch (s->fields.op2) {
3644     case 0x55: /* risbg */
3645     case 0x59: /* risbgn */
3646         i3 &= 63;
3647         i4 &= 63;
3648         pmask = ~0;
3649         break;
3650     case 0x5d: /* risbhg */
3651         i3 &= 31;
3652         i4 &= 31;
3653         pmask = 0xffffffff00000000ull;
3654         break;
3655     case 0x51: /* risblg */
3656         i3 = (i3 & 31) + 32;
3657         i4 = (i4 & 31) + 32;
3658         pmask = 0x00000000ffffffffull;
3659         break;
3660     default:
3661         g_assert_not_reached();
3662     }
3663 
3664     /* MASK is the set of bits to be inserted from R2. */
3665     if (i3 <= i4) {
3666         /* [0...i3---i4...63] */
3667         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3668     } else {
3669         /* [0---i4...i3---63] */
3670         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3671     }
3672     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3673     mask &= pmask;
3674 
3675     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3676        insns, we need to keep the other half of the register.  */
3677     imask = ~mask | ~pmask;
3678     if (do_zero) {
3679         imask = ~pmask;
3680     }
3681 
3682     len = i4 - i3 + 1;
3683     pos = 63 - i4;
3684     rot = i5 & 63;
3685 
3686     /* In some cases we can implement this with extract.  */
3687     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3688         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3689         return DISAS_NEXT;
3690     }
3691 
3692     /* In some cases we can implement this with deposit.  */
3693     if (len > 0 && (imask == 0 || ~mask == imask)) {
3694         /* Note that we rotate the bits to be inserted to the lsb, not to
3695            the position as described in the PoO.  */
3696         rot = (rot - pos) & 63;
3697     } else {
3698         pos = -1;
3699     }
3700 
3701     /* Rotate the input as necessary.  */
3702     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3703 
3704     /* Insert the selected bits into the output.  */
3705     if (pos >= 0) {
3706         if (imask == 0) {
3707             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3708         } else {
3709             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3710         }
3711     } else if (imask == 0) {
3712         tcg_gen_andi_i64(o->out, o->in2, mask);
3713     } else {
3714         tcg_gen_andi_i64(o->in2, o->in2, mask);
3715         tcg_gen_andi_i64(o->out, o->out, imask);
3716         tcg_gen_or_i64(o->out, o->out, o->in2);
3717     }
3718     return DISAS_NEXT;
3719 }
3720 
3721 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3722 {
3723     int i3 = get_field(s, i3);
3724     int i4 = get_field(s, i4);
3725     int i5 = get_field(s, i5);
3726     TCGv_i64 orig_out;
3727     uint64_t mask;
3728 
3729     /* If this is a test-only form, arrange to discard the result.  */
3730     if (i3 & 0x80) {
3731         tcg_debug_assert(o->out != NULL);
3732         orig_out = o->out;
3733         o->out = tcg_temp_new_i64();
3734         tcg_gen_mov_i64(o->out, orig_out);
3735     }
3736 
3737     i3 &= 63;
3738     i4 &= 63;
3739     i5 &= 63;
3740 
3741     /* MASK is the set of bits to be operated on from R2.
3742        Take care for I3/I4 wraparound.  */
3743     mask = ~0ull >> i3;
3744     if (i3 <= i4) {
3745         mask ^= ~0ull >> i4 >> 1;
3746     } else {
3747         mask |= ~(~0ull >> i4 >> 1);
3748     }
3749 
3750     /* Rotate the input as necessary.  */
3751     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3752 
3753     /* Operate.  */
3754     switch (s->fields.op2) {
3755     case 0x54: /* AND */
3756         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3757         tcg_gen_and_i64(o->out, o->out, o->in2);
3758         break;
3759     case 0x56: /* OR */
3760         tcg_gen_andi_i64(o->in2, o->in2, mask);
3761         tcg_gen_or_i64(o->out, o->out, o->in2);
3762         break;
3763     case 0x57: /* XOR */
3764         tcg_gen_andi_i64(o->in2, o->in2, mask);
3765         tcg_gen_xor_i64(o->out, o->out, o->in2);
3766         break;
3767     default:
3768         abort();
3769     }
3770 
3771     /* Set the CC.  */
3772     tcg_gen_andi_i64(cc_dst, o->out, mask);
3773     set_cc_nz_u64(s, cc_dst);
3774     return DISAS_NEXT;
3775 }
3776 
3777 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3778 {
3779     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3780     return DISAS_NEXT;
3781 }
3782 
3783 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3784 {
3785     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3786     return DISAS_NEXT;
3787 }
3788 
3789 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3790 {
3791     tcg_gen_bswap64_i64(o->out, o->in2);
3792     return DISAS_NEXT;
3793 }
3794 
3795 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3796 {
3797     TCGv_i32 t1 = tcg_temp_new_i32();
3798     TCGv_i32 t2 = tcg_temp_new_i32();
3799     TCGv_i32 to = tcg_temp_new_i32();
3800     tcg_gen_extrl_i64_i32(t1, o->in1);
3801     tcg_gen_extrl_i64_i32(t2, o->in2);
3802     tcg_gen_rotl_i32(to, t1, t2);
3803     tcg_gen_extu_i32_i64(o->out, to);
3804     return DISAS_NEXT;
3805 }
3806 
3807 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3808 {
3809     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3810     return DISAS_NEXT;
3811 }
3812 
3813 #ifndef CONFIG_USER_ONLY
3814 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3815 {
3816     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3817     set_cc_static(s);
3818     return DISAS_NEXT;
3819 }
3820 
3821 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3822 {
3823     gen_helper_sacf(cpu_env, o->in2);
3824     /* Addressing mode has changed, so end the block.  */
3825     return DISAS_TOO_MANY;
3826 }
3827 #endif
3828 
3829 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3830 {
3831     int sam = s->insn->data;
3832     TCGv_i64 tsam;
3833     uint64_t mask;
3834 
3835     switch (sam) {
3836     case 0:
3837         mask = 0xffffff;
3838         break;
3839     case 1:
3840         mask = 0x7fffffff;
3841         break;
3842     default:
3843         mask = -1;
3844         break;
3845     }
3846 
3847     /* Bizarre but true, we check the address of the current insn for the
3848        specification exception, not the next to be executed.  Thus the PoO
3849        documents that Bad Things Happen two bytes before the end.  */
3850     if (s->base.pc_next & ~mask) {
3851         gen_program_exception(s, PGM_SPECIFICATION);
3852         return DISAS_NORETURN;
3853     }
3854     s->pc_tmp &= mask;
3855 
3856     tsam = tcg_constant_i64(sam);
3857     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3858 
3859     /* Always exit the TB, since we (may have) changed execution mode.  */
3860     return DISAS_TOO_MANY;
3861 }
3862 
3863 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3864 {
3865     int r1 = get_field(s, r1);
3866     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3867     return DISAS_NEXT;
3868 }
3869 
3870 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3871 {
3872     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3873     return DISAS_NEXT;
3874 }
3875 
3876 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3877 {
3878     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3879     return DISAS_NEXT;
3880 }
3881 
3882 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3883 {
3884     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3885     return DISAS_NEXT;
3886 }
3887 
3888 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3889 {
3890     gen_helper_sqeb(o->out, cpu_env, o->in2);
3891     return DISAS_NEXT;
3892 }
3893 
3894 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3895 {
3896     gen_helper_sqdb(o->out, cpu_env, o->in2);
3897     return DISAS_NEXT;
3898 }
3899 
3900 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3901 {
3902     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
3903     return DISAS_NEXT;
3904 }
3905 
3906 #ifndef CONFIG_USER_ONLY
3907 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3908 {
3909     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3910     set_cc_static(s);
3911     return DISAS_NEXT;
3912 }
3913 
3914 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3915 {
3916     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3917     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3918 
3919     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3920     set_cc_static(s);
3921     return DISAS_NEXT;
3922 }
3923 #endif
3924 
3925 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3926 {
3927     DisasCompare c;
3928     TCGv_i64 a, h;
3929     TCGLabel *lab;
3930     int r1;
3931 
3932     disas_jcc(s, &c, get_field(s, m3));
3933 
3934     /* We want to store when the condition is fulfilled, so branch
3935        out when it's not */
3936     c.cond = tcg_invert_cond(c.cond);
3937 
3938     lab = gen_new_label();
3939     if (c.is_64) {
3940         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3941     } else {
3942         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3943     }
3944 
3945     r1 = get_field(s, r1);
3946     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3947     switch (s->insn->data) {
3948     case 1: /* STOCG */
3949         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3950         break;
3951     case 0: /* STOC */
3952         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3953         break;
3954     case 2: /* STOCFH */
3955         h = tcg_temp_new_i64();
3956         tcg_gen_shri_i64(h, regs[r1], 32);
3957         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3958         break;
3959     default:
3960         g_assert_not_reached();
3961     }
3962 
3963     gen_set_label(lab);
3964     return DISAS_NEXT;
3965 }
3966 
3967 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3968 {
3969     TCGv_i64 t;
3970     uint64_t sign = 1ull << s->insn->data;
3971     if (s->insn->data == 31) {
3972         t = tcg_temp_new_i64();
3973         tcg_gen_shli_i64(t, o->in1, 32);
3974     } else {
3975         t = o->in1;
3976     }
3977     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3978     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3979     /* The arithmetic left shift is curious in that it does not affect
3980        the sign bit.  Copy that over from the source unchanged.  */
3981     tcg_gen_andi_i64(o->out, o->out, ~sign);
3982     tcg_gen_andi_i64(o->in1, o->in1, sign);
3983     tcg_gen_or_i64(o->out, o->out, o->in1);
3984     return DISAS_NEXT;
3985 }
3986 
3987 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3988 {
3989     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3990     return DISAS_NEXT;
3991 }
3992 
3993 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
3994 {
3995     tcg_gen_sar_i64(o->out, o->in1, o->in2);
3996     return DISAS_NEXT;
3997 }
3998 
3999 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4000 {
4001     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4002     return DISAS_NEXT;
4003 }
4004 
4005 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4006 {
4007     gen_helper_sfpc(cpu_env, o->in2);
4008     return DISAS_NEXT;
4009 }
4010 
4011 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4012 {
4013     gen_helper_sfas(cpu_env, o->in2);
4014     return DISAS_NEXT;
4015 }
4016 
4017 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4018 {
4019     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4020     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4021     gen_helper_srnm(cpu_env, o->addr1);
4022     return DISAS_NEXT;
4023 }
4024 
4025 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4026 {
4027     /* Bits 0-55 are are ignored. */
4028     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4029     gen_helper_srnm(cpu_env, o->addr1);
4030     return DISAS_NEXT;
4031 }
4032 
4033 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4034 {
4035     TCGv_i64 tmp = tcg_temp_new_i64();
4036 
4037     /* Bits other than 61-63 are ignored. */
4038     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4039 
4040     /* No need to call a helper, we don't implement dfp */
4041     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4042     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4043     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4044     return DISAS_NEXT;
4045 }
4046 
4047 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4048 {
4049     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4050     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4051     set_cc_static(s);
4052 
4053     tcg_gen_shri_i64(o->in1, o->in1, 24);
4054     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4055     return DISAS_NEXT;
4056 }
4057 
4058 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4059 {
4060     int b1 = get_field(s, b1);
4061     int d1 = get_field(s, d1);
4062     int b2 = get_field(s, b2);
4063     int d2 = get_field(s, d2);
4064     int r3 = get_field(s, r3);
4065     TCGv_i64 tmp = tcg_temp_new_i64();
4066 
4067     /* fetch all operands first */
4068     o->in1 = tcg_temp_new_i64();
4069     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4070     o->in2 = tcg_temp_new_i64();
4071     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4072     o->addr1 = tcg_temp_new_i64();
4073     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4074 
4075     /* load the third operand into r3 before modifying anything */
4076     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4077 
4078     /* subtract CPU timer from first operand and store in GR0 */
4079     gen_helper_stpt(tmp, cpu_env);
4080     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4081 
4082     /* store second operand in GR1 */
4083     tcg_gen_mov_i64(regs[1], o->in2);
4084     return DISAS_NEXT;
4085 }
4086 
4087 #ifndef CONFIG_USER_ONLY
4088 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4089 {
4090     tcg_gen_shri_i64(o->in2, o->in2, 4);
4091     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4092     return DISAS_NEXT;
4093 }
4094 
4095 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4096 {
4097     gen_helper_sske(cpu_env, o->in1, o->in2);
4098     return DISAS_NEXT;
4099 }
4100 
4101 static void gen_check_psw_mask(DisasContext *s)
4102 {
4103     TCGv_i64 reserved = tcg_temp_new_i64();
4104     TCGLabel *ok = gen_new_label();
4105 
4106     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4107     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4108     gen_program_exception(s, PGM_SPECIFICATION);
4109     gen_set_label(ok);
4110 }
4111 
4112 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4113 {
4114     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4115 
4116     gen_check_psw_mask(s);
4117 
4118     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4119     s->exit_to_mainloop = true;
4120     return DISAS_TOO_MANY;
4121 }
4122 
4123 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4124 {
4125     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4126     return DISAS_NEXT;
4127 }
4128 #endif
4129 
4130 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4131 {
4132     gen_helper_stck(o->out, cpu_env);
4133     /* ??? We don't implement clock states.  */
4134     gen_op_movi_cc(s, 0);
4135     return DISAS_NEXT;
4136 }
4137 
4138 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4139 {
4140     TCGv_i64 c1 = tcg_temp_new_i64();
4141     TCGv_i64 c2 = tcg_temp_new_i64();
4142     TCGv_i64 todpr = tcg_temp_new_i64();
4143     gen_helper_stck(c1, cpu_env);
4144     /* 16 bit value store in an uint32_t (only valid bits set) */
4145     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4146     /* Shift the 64-bit value into its place as a zero-extended
4147        104-bit value.  Note that "bit positions 64-103 are always
4148        non-zero so that they compare differently to STCK"; we set
4149        the least significant bit to 1.  */
4150     tcg_gen_shli_i64(c2, c1, 56);
4151     tcg_gen_shri_i64(c1, c1, 8);
4152     tcg_gen_ori_i64(c2, c2, 0x10000);
4153     tcg_gen_or_i64(c2, c2, todpr);
4154     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4155     tcg_gen_addi_i64(o->in2, o->in2, 8);
4156     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4157     /* ??? We don't implement clock states.  */
4158     gen_op_movi_cc(s, 0);
4159     return DISAS_NEXT;
4160 }
4161 
4162 #ifndef CONFIG_USER_ONLY
4163 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4164 {
4165     gen_helper_sck(cc_op, cpu_env, o->in2);
4166     set_cc_static(s);
4167     return DISAS_NEXT;
4168 }
4169 
4170 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4171 {
4172     gen_helper_sckc(cpu_env, o->in2);
4173     return DISAS_NEXT;
4174 }
4175 
4176 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4177 {
4178     gen_helper_sckpf(cpu_env, regs[0]);
4179     return DISAS_NEXT;
4180 }
4181 
4182 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4183 {
4184     gen_helper_stckc(o->out, cpu_env);
4185     return DISAS_NEXT;
4186 }
4187 
4188 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4189 {
4190     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4191     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4192 
4193     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4194     return DISAS_NEXT;
4195 }
4196 
4197 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4198 {
4199     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4200     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4201 
4202     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4203     return DISAS_NEXT;
4204 }
4205 
4206 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4207 {
4208     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4209     return DISAS_NEXT;
4210 }
4211 
4212 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4213 {
4214     gen_helper_spt(cpu_env, o->in2);
4215     return DISAS_NEXT;
4216 }
4217 
4218 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4219 {
4220     gen_helper_stfl(cpu_env);
4221     return DISAS_NEXT;
4222 }
4223 
4224 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4225 {
4226     gen_helper_stpt(o->out, cpu_env);
4227     return DISAS_NEXT;
4228 }
4229 
4230 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4231 {
4232     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4233     set_cc_static(s);
4234     return DISAS_NEXT;
4235 }
4236 
4237 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4238 {
4239     gen_helper_spx(cpu_env, o->in2);
4240     return DISAS_NEXT;
4241 }
4242 
4243 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4244 {
4245     gen_helper_xsch(cpu_env, regs[1]);
4246     set_cc_static(s);
4247     return DISAS_NEXT;
4248 }
4249 
4250 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4251 {
4252     gen_helper_csch(cpu_env, regs[1]);
4253     set_cc_static(s);
4254     return DISAS_NEXT;
4255 }
4256 
4257 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4258 {
4259     gen_helper_hsch(cpu_env, regs[1]);
4260     set_cc_static(s);
4261     return DISAS_NEXT;
4262 }
4263 
4264 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4265 {
4266     gen_helper_msch(cpu_env, regs[1], o->in2);
4267     set_cc_static(s);
4268     return DISAS_NEXT;
4269 }
4270 
4271 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4272 {
4273     gen_helper_rchp(cpu_env, regs[1]);
4274     set_cc_static(s);
4275     return DISAS_NEXT;
4276 }
4277 
4278 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4279 {
4280     gen_helper_rsch(cpu_env, regs[1]);
4281     set_cc_static(s);
4282     return DISAS_NEXT;
4283 }
4284 
4285 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4286 {
4287     gen_helper_sal(cpu_env, regs[1]);
4288     return DISAS_NEXT;
4289 }
4290 
4291 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4292 {
4293     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4294     return DISAS_NEXT;
4295 }
4296 
4297 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4298 {
4299     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4300     gen_op_movi_cc(s, 3);
4301     return DISAS_NEXT;
4302 }
4303 
4304 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4305 {
4306     /* The instruction is suppressed if not provided. */
4307     return DISAS_NEXT;
4308 }
4309 
4310 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4311 {
4312     gen_helper_ssch(cpu_env, regs[1], o->in2);
4313     set_cc_static(s);
4314     return DISAS_NEXT;
4315 }
4316 
4317 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4318 {
4319     gen_helper_stsch(cpu_env, regs[1], o->in2);
4320     set_cc_static(s);
4321     return DISAS_NEXT;
4322 }
4323 
4324 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4325 {
4326     gen_helper_stcrw(cpu_env, o->in2);
4327     set_cc_static(s);
4328     return DISAS_NEXT;
4329 }
4330 
4331 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4332 {
4333     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4334     set_cc_static(s);
4335     return DISAS_NEXT;
4336 }
4337 
4338 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4339 {
4340     gen_helper_tsch(cpu_env, regs[1], o->in2);
4341     set_cc_static(s);
4342     return DISAS_NEXT;
4343 }
4344 
4345 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4346 {
4347     gen_helper_chsc(cpu_env, o->in2);
4348     set_cc_static(s);
4349     return DISAS_NEXT;
4350 }
4351 
4352 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4353 {
4354     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4355     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4356     return DISAS_NEXT;
4357 }
4358 
4359 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4360 {
4361     uint64_t i2 = get_field(s, i2);
4362     TCGv_i64 t;
4363 
4364     /* It is important to do what the instruction name says: STORE THEN.
4365        If we let the output hook perform the store then if we fault and
4366        restart, we'll have the wrong SYSTEM MASK in place.  */
4367     t = tcg_temp_new_i64();
4368     tcg_gen_shri_i64(t, psw_mask, 56);
4369     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4370 
4371     if (s->fields.op == 0xac) {
4372         tcg_gen_andi_i64(psw_mask, psw_mask,
4373                          (i2 << 56) | 0x00ffffffffffffffull);
4374     } else {
4375         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4376     }
4377 
4378     gen_check_psw_mask(s);
4379 
4380     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4381     s->exit_to_mainloop = true;
4382     return DISAS_TOO_MANY;
4383 }
4384 
4385 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4386 {
4387     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4388 
4389     if (s->base.tb->flags & FLAG_MASK_PER) {
4390         update_psw_addr(s);
4391         gen_helper_per_store_real(cpu_env);
4392     }
4393     return DISAS_NEXT;
4394 }
4395 #endif
4396 
4397 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4398 {
4399     gen_helper_stfle(cc_op, cpu_env, o->in2);
4400     set_cc_static(s);
4401     return DISAS_NEXT;
4402 }
4403 
4404 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4405 {
4406     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4407     return DISAS_NEXT;
4408 }
4409 
4410 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4411 {
4412     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4413     return DISAS_NEXT;
4414 }
4415 
4416 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4417 {
4418     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4419                        MO_TEUL | s->insn->data);
4420     return DISAS_NEXT;
4421 }
4422 
4423 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4424 {
4425     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4426                         MO_TEUQ | s->insn->data);
4427     return DISAS_NEXT;
4428 }
4429 
4430 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4431 {
4432     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4433     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4434 
4435     gen_helper_stam(cpu_env, r1, o->in2, r3);
4436     return DISAS_NEXT;
4437 }
4438 
4439 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4440 {
4441     int m3 = get_field(s, m3);
4442     int pos, base = s->insn->data;
4443     TCGv_i64 tmp = tcg_temp_new_i64();
4444 
4445     pos = base + ctz32(m3) * 8;
4446     switch (m3) {
4447     case 0xf:
4448         /* Effectively a 32-bit store.  */
4449         tcg_gen_shri_i64(tmp, o->in1, pos);
4450         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4451         break;
4452 
4453     case 0xc:
4454     case 0x6:
4455     case 0x3:
4456         /* Effectively a 16-bit store.  */
4457         tcg_gen_shri_i64(tmp, o->in1, pos);
4458         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4459         break;
4460 
4461     case 0x8:
4462     case 0x4:
4463     case 0x2:
4464     case 0x1:
4465         /* Effectively an 8-bit store.  */
4466         tcg_gen_shri_i64(tmp, o->in1, pos);
4467         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4468         break;
4469 
4470     default:
4471         /* This is going to be a sequence of shifts and stores.  */
4472         pos = base + 32 - 8;
4473         while (m3) {
4474             if (m3 & 0x8) {
4475                 tcg_gen_shri_i64(tmp, o->in1, pos);
4476                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4477                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4478             }
4479             m3 = (m3 << 1) & 0xf;
4480             pos -= 8;
4481         }
4482         break;
4483     }
4484     return DISAS_NEXT;
4485 }
4486 
4487 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4488 {
4489     int r1 = get_field(s, r1);
4490     int r3 = get_field(s, r3);
4491     int size = s->insn->data;
4492     TCGv_i64 tsize = tcg_constant_i64(size);
4493 
4494     while (1) {
4495         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4496                             size == 8 ? MO_TEUQ : MO_TEUL);
4497         if (r1 == r3) {
4498             break;
4499         }
4500         tcg_gen_add_i64(o->in2, o->in2, tsize);
4501         r1 = (r1 + 1) & 15;
4502     }
4503 
4504     return DISAS_NEXT;
4505 }
4506 
4507 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4508 {
4509     int r1 = get_field(s, r1);
4510     int r3 = get_field(s, r3);
4511     TCGv_i64 t = tcg_temp_new_i64();
4512     TCGv_i64 t4 = tcg_constant_i64(4);
4513     TCGv_i64 t32 = tcg_constant_i64(32);
4514 
4515     while (1) {
4516         tcg_gen_shl_i64(t, regs[r1], t32);
4517         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4518         if (r1 == r3) {
4519             break;
4520         }
4521         tcg_gen_add_i64(o->in2, o->in2, t4);
4522         r1 = (r1 + 1) & 15;
4523     }
4524     return DISAS_NEXT;
4525 }
4526 
4527 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4528 {
4529     TCGv_i128 t16 = tcg_temp_new_i128();
4530 
4531     tcg_gen_concat_i64_i128(t16, o->out2, o->out);
4532     tcg_gen_qemu_st_i128(t16, o->in2, get_mem_index(s),
4533                          MO_TE | MO_128 | MO_ALIGN);
4534     return DISAS_NEXT;
4535 }
4536 
4537 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4538 {
4539     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4540     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4541 
4542     gen_helper_srst(cpu_env, r1, r2);
4543     set_cc_static(s);
4544     return DISAS_NEXT;
4545 }
4546 
4547 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4548 {
4549     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4550     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4551 
4552     gen_helper_srstu(cpu_env, r1, r2);
4553     set_cc_static(s);
4554     return DISAS_NEXT;
4555 }
4556 
4557 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4558 {
4559     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4560     return DISAS_NEXT;
4561 }
4562 
4563 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4564 {
4565     tcg_gen_movi_i64(cc_src, 0);
4566     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4567     return DISAS_NEXT;
4568 }
4569 
4570 /* Compute borrow (0, -1) into cc_src. */
4571 static void compute_borrow(DisasContext *s)
4572 {
4573     switch (s->cc_op) {
4574     case CC_OP_SUBU:
4575         /* The borrow value is already in cc_src (0,-1). */
4576         break;
4577     default:
4578         gen_op_calc_cc(s);
4579         /* fall through */
4580     case CC_OP_STATIC:
4581         /* The carry flag is the msb of CC; compute into cc_src. */
4582         tcg_gen_extu_i32_i64(cc_src, cc_op);
4583         tcg_gen_shri_i64(cc_src, cc_src, 1);
4584         /* fall through */
4585     case CC_OP_ADDU:
4586         /* Convert carry (1,0) to borrow (0,-1). */
4587         tcg_gen_subi_i64(cc_src, cc_src, 1);
4588         break;
4589     }
4590 }
4591 
4592 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4593 {
4594     compute_borrow(s);
4595 
4596     /* Borrow is {0, -1}, so add to subtract. */
4597     tcg_gen_add_i64(o->out, o->in1, cc_src);
4598     tcg_gen_sub_i64(o->out, o->out, o->in2);
4599     return DISAS_NEXT;
4600 }
4601 
4602 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4603 {
4604     compute_borrow(s);
4605 
4606     /*
4607      * Borrow is {0, -1}, so add to subtract; replicate the
4608      * borrow input to produce 128-bit -1 for the addition.
4609      */
4610     TCGv_i64 zero = tcg_constant_i64(0);
4611     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4612     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4613 
4614     return DISAS_NEXT;
4615 }
4616 
4617 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4618 {
4619     TCGv_i32 t;
4620 
4621     update_psw_addr(s);
4622     update_cc_op(s);
4623 
4624     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4625     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4626 
4627     t = tcg_constant_i32(s->ilen);
4628     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4629 
4630     gen_exception(EXCP_SVC);
4631     return DISAS_NORETURN;
4632 }
4633 
4634 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4635 {
4636     int cc = 0;
4637 
4638     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4639     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4640     gen_op_movi_cc(s, cc);
4641     return DISAS_NEXT;
4642 }
4643 
4644 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4645 {
4646     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4647     set_cc_static(s);
4648     return DISAS_NEXT;
4649 }
4650 
4651 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4652 {
4653     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4654     set_cc_static(s);
4655     return DISAS_NEXT;
4656 }
4657 
4658 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4659 {
4660     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4661     set_cc_static(s);
4662     return DISAS_NEXT;
4663 }
4664 
4665 #ifndef CONFIG_USER_ONLY
4666 
4667 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4668 {
4669     gen_helper_testblock(cc_op, cpu_env, o->in2);
4670     set_cc_static(s);
4671     return DISAS_NEXT;
4672 }
4673 
4674 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4675 {
4676     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4677     set_cc_static(s);
4678     return DISAS_NEXT;
4679 }
4680 
4681 #endif
4682 
4683 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4684 {
4685     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4686 
4687     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4688     set_cc_static(s);
4689     return DISAS_NEXT;
4690 }
4691 
4692 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4693 {
4694     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4695 
4696     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4697     set_cc_static(s);
4698     return DISAS_NEXT;
4699 }
4700 
4701 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4702 {
4703     TCGv_i128 pair = tcg_temp_new_i128();
4704 
4705     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4706     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4707     set_cc_static(s);
4708     return DISAS_NEXT;
4709 }
4710 
4711 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4712 {
4713     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4714 
4715     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4716     set_cc_static(s);
4717     return DISAS_NEXT;
4718 }
4719 
4720 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4721 {
4722     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4723 
4724     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4725     set_cc_static(s);
4726     return DISAS_NEXT;
4727 }
4728 
4729 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4730 {
4731     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4732     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4733     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4734     TCGv_i32 tst = tcg_temp_new_i32();
4735     int m3 = get_field(s, m3);
4736 
4737     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4738         m3 = 0;
4739     }
4740     if (m3 & 1) {
4741         tcg_gen_movi_i32(tst, -1);
4742     } else {
4743         tcg_gen_extrl_i64_i32(tst, regs[0]);
4744         if (s->insn->opc & 3) {
4745             tcg_gen_ext8u_i32(tst, tst);
4746         } else {
4747             tcg_gen_ext16u_i32(tst, tst);
4748         }
4749     }
4750     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4751 
4752     set_cc_static(s);
4753     return DISAS_NEXT;
4754 }
4755 
4756 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4757 {
4758     TCGv_i32 t1 = tcg_constant_i32(0xff);
4759 
4760     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4761     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4762     set_cc_static(s);
4763     return DISAS_NEXT;
4764 }
4765 
4766 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4767 {
4768     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4769 
4770     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4771     return DISAS_NEXT;
4772 }
4773 
4774 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4775 {
4776     int l1 = get_field(s, l1) + 1;
4777     TCGv_i32 l;
4778 
4779     /* The length must not exceed 32 bytes.  */
4780     if (l1 > 32) {
4781         gen_program_exception(s, PGM_SPECIFICATION);
4782         return DISAS_NORETURN;
4783     }
4784     l = tcg_constant_i32(l1);
4785     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4786     set_cc_static(s);
4787     return DISAS_NEXT;
4788 }
4789 
4790 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4791 {
4792     int l1 = get_field(s, l1) + 1;
4793     TCGv_i32 l;
4794 
4795     /* The length must be even and should not exceed 64 bytes.  */
4796     if ((l1 & 1) || (l1 > 64)) {
4797         gen_program_exception(s, PGM_SPECIFICATION);
4798         return DISAS_NORETURN;
4799     }
4800     l = tcg_constant_i32(l1);
4801     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4802     set_cc_static(s);
4803     return DISAS_NEXT;
4804 }
4805 
4806 
4807 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4808 {
4809     int d1 = get_field(s, d1);
4810     int d2 = get_field(s, d2);
4811     int b1 = get_field(s, b1);
4812     int b2 = get_field(s, b2);
4813     int l = get_field(s, l1);
4814     TCGv_i32 t32;
4815 
4816     o->addr1 = get_address(s, 0, b1, d1);
4817 
4818     /* If the addresses are identical, this is a store/memset of zero.  */
4819     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4820         o->in2 = tcg_constant_i64(0);
4821 
4822         l++;
4823         while (l >= 8) {
4824             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4825             l -= 8;
4826             if (l > 0) {
4827                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4828             }
4829         }
4830         if (l >= 4) {
4831             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4832             l -= 4;
4833             if (l > 0) {
4834                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4835             }
4836         }
4837         if (l >= 2) {
4838             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4839             l -= 2;
4840             if (l > 0) {
4841                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4842             }
4843         }
4844         if (l) {
4845             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4846         }
4847         gen_op_movi_cc(s, 0);
4848         return DISAS_NEXT;
4849     }
4850 
4851     /* But in general we'll defer to a helper.  */
4852     o->in2 = get_address(s, 0, b2, d2);
4853     t32 = tcg_constant_i32(l);
4854     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4855     set_cc_static(s);
4856     return DISAS_NEXT;
4857 }
4858 
4859 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4860 {
4861     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4862     return DISAS_NEXT;
4863 }
4864 
4865 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4866 {
4867     int shift = s->insn->data & 0xff;
4868     int size = s->insn->data >> 8;
4869     uint64_t mask = ((1ull << size) - 1) << shift;
4870     TCGv_i64 t = tcg_temp_new_i64();
4871 
4872     tcg_gen_shli_i64(t, o->in2, shift);
4873     tcg_gen_xor_i64(o->out, o->in1, t);
4874 
4875     /* Produce the CC from only the bits manipulated.  */
4876     tcg_gen_andi_i64(cc_dst, o->out, mask);
4877     set_cc_nz_u64(s, cc_dst);
4878     return DISAS_NEXT;
4879 }
4880 
4881 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4882 {
4883     o->in1 = tcg_temp_new_i64();
4884 
4885     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4886         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4887     } else {
4888         /* Perform the atomic operation in memory. */
4889         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4890                                      s->insn->data);
4891     }
4892 
4893     /* Recompute also for atomic case: needed for setting CC. */
4894     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4895 
4896     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4897         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4898     }
4899     return DISAS_NEXT;
4900 }
4901 
4902 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4903 {
4904     o->out = tcg_constant_i64(0);
4905     return DISAS_NEXT;
4906 }
4907 
4908 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4909 {
4910     o->out = tcg_constant_i64(0);
4911     o->out2 = o->out;
4912     return DISAS_NEXT;
4913 }
4914 
4915 #ifndef CONFIG_USER_ONLY
4916 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4917 {
4918     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4919 
4920     gen_helper_clp(cpu_env, r2);
4921     set_cc_static(s);
4922     return DISAS_NEXT;
4923 }
4924 
4925 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4926 {
4927     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4928     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4929 
4930     gen_helper_pcilg(cpu_env, r1, r2);
4931     set_cc_static(s);
4932     return DISAS_NEXT;
4933 }
4934 
4935 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4936 {
4937     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4938     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4939 
4940     gen_helper_pcistg(cpu_env, r1, r2);
4941     set_cc_static(s);
4942     return DISAS_NEXT;
4943 }
4944 
4945 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4946 {
4947     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4948     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4949 
4950     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4951     set_cc_static(s);
4952     return DISAS_NEXT;
4953 }
4954 
4955 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4956 {
4957     gen_helper_sic(cpu_env, o->in1, o->in2);
4958     return DISAS_NEXT;
4959 }
4960 
4961 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4962 {
4963     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4964     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4965 
4966     gen_helper_rpcit(cpu_env, r1, r2);
4967     set_cc_static(s);
4968     return DISAS_NEXT;
4969 }
4970 
4971 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4972 {
4973     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4974     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4975     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4976 
4977     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4978     set_cc_static(s);
4979     return DISAS_NEXT;
4980 }
4981 
4982 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4983 {
4984     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4985     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4986 
4987     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4988     set_cc_static(s);
4989     return DISAS_NEXT;
4990 }
4991 #endif
4992 
4993 #include "translate_vx.c.inc"
4994 
4995 /* ====================================================================== */
4996 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
4997    the original inputs), update the various cc data structures in order to
4998    be able to compute the new condition code.  */
4999 
5000 static void cout_abs32(DisasContext *s, DisasOps *o)
5001 {
5002     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5003 }
5004 
5005 static void cout_abs64(DisasContext *s, DisasOps *o)
5006 {
5007     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5008 }
5009 
5010 static void cout_adds32(DisasContext *s, DisasOps *o)
5011 {
5012     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5013 }
5014 
5015 static void cout_adds64(DisasContext *s, DisasOps *o)
5016 {
5017     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5018 }
5019 
5020 static void cout_addu32(DisasContext *s, DisasOps *o)
5021 {
5022     tcg_gen_shri_i64(cc_src, o->out, 32);
5023     tcg_gen_ext32u_i64(cc_dst, o->out);
5024     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5025 }
5026 
5027 static void cout_addu64(DisasContext *s, DisasOps *o)
5028 {
5029     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5030 }
5031 
5032 static void cout_cmps32(DisasContext *s, DisasOps *o)
5033 {
5034     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5035 }
5036 
5037 static void cout_cmps64(DisasContext *s, DisasOps *o)
5038 {
5039     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5040 }
5041 
5042 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5043 {
5044     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5045 }
5046 
5047 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5048 {
5049     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5050 }
5051 
5052 static void cout_f32(DisasContext *s, DisasOps *o)
5053 {
5054     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5055 }
5056 
5057 static void cout_f64(DisasContext *s, DisasOps *o)
5058 {
5059     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5060 }
5061 
5062 static void cout_f128(DisasContext *s, DisasOps *o)
5063 {
5064     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5065 }
5066 
5067 static void cout_nabs32(DisasContext *s, DisasOps *o)
5068 {
5069     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5070 }
5071 
5072 static void cout_nabs64(DisasContext *s, DisasOps *o)
5073 {
5074     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5075 }
5076 
5077 static void cout_neg32(DisasContext *s, DisasOps *o)
5078 {
5079     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5080 }
5081 
5082 static void cout_neg64(DisasContext *s, DisasOps *o)
5083 {
5084     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5085 }
5086 
5087 static void cout_nz32(DisasContext *s, DisasOps *o)
5088 {
5089     tcg_gen_ext32u_i64(cc_dst, o->out);
5090     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5091 }
5092 
5093 static void cout_nz64(DisasContext *s, DisasOps *o)
5094 {
5095     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5096 }
5097 
5098 static void cout_s32(DisasContext *s, DisasOps *o)
5099 {
5100     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5101 }
5102 
5103 static void cout_s64(DisasContext *s, DisasOps *o)
5104 {
5105     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5106 }
5107 
5108 static void cout_subs32(DisasContext *s, DisasOps *o)
5109 {
5110     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5111 }
5112 
5113 static void cout_subs64(DisasContext *s, DisasOps *o)
5114 {
5115     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5116 }
5117 
5118 static void cout_subu32(DisasContext *s, DisasOps *o)
5119 {
5120     tcg_gen_sari_i64(cc_src, o->out, 32);
5121     tcg_gen_ext32u_i64(cc_dst, o->out);
5122     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5123 }
5124 
5125 static void cout_subu64(DisasContext *s, DisasOps *o)
5126 {
5127     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5128 }
5129 
5130 static void cout_tm32(DisasContext *s, DisasOps *o)
5131 {
5132     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5133 }
5134 
5135 static void cout_tm64(DisasContext *s, DisasOps *o)
5136 {
5137     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5138 }
5139 
5140 static void cout_muls32(DisasContext *s, DisasOps *o)
5141 {
5142     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5143 }
5144 
5145 static void cout_muls64(DisasContext *s, DisasOps *o)
5146 {
5147     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5148     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5149 }
5150 
5151 /* ====================================================================== */
5152 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5153    with the TCG register to which we will write.  Used in combination with
5154    the "wout" generators, in some cases we need a new temporary, and in
5155    some cases we can write to a TCG global.  */
5156 
5157 static void prep_new(DisasContext *s, DisasOps *o)
5158 {
5159     o->out = tcg_temp_new_i64();
5160 }
5161 #define SPEC_prep_new 0
5162 
5163 static void prep_new_P(DisasContext *s, DisasOps *o)
5164 {
5165     o->out = tcg_temp_new_i64();
5166     o->out2 = tcg_temp_new_i64();
5167 }
5168 #define SPEC_prep_new_P 0
5169 
5170 static void prep_new_x(DisasContext *s, DisasOps *o)
5171 {
5172     o->out_128 = tcg_temp_new_i128();
5173 }
5174 #define SPEC_prep_new_x 0
5175 
5176 static void prep_r1(DisasContext *s, DisasOps *o)
5177 {
5178     o->out = regs[get_field(s, r1)];
5179 }
5180 #define SPEC_prep_r1 0
5181 
5182 static void prep_r1_P(DisasContext *s, DisasOps *o)
5183 {
5184     int r1 = get_field(s, r1);
5185     o->out = regs[r1];
5186     o->out2 = regs[r1 + 1];
5187 }
5188 #define SPEC_prep_r1_P SPEC_r1_even
5189 
5190 static void prep_x1(DisasContext *s, DisasOps *o)
5191 {
5192     o->out_128 = load_freg_128(get_field(s, r1));
5193 }
5194 #define SPEC_prep_x1 SPEC_r1_f128
5195 
5196 /* ====================================================================== */
5197 /* The "Write OUTput" generators.  These generally perform some non-trivial
5198    copy of data to TCG globals, or to main memory.  The trivial cases are
5199    generally handled by having a "prep" generator install the TCG global
5200    as the destination of the operation.  */
5201 
5202 static void wout_r1(DisasContext *s, DisasOps *o)
5203 {
5204     store_reg(get_field(s, r1), o->out);
5205 }
5206 #define SPEC_wout_r1 0
5207 
5208 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5209 {
5210     store_reg(get_field(s, r1), o->out2);
5211 }
5212 #define SPEC_wout_out2_r1 0
5213 
5214 static void wout_r1_8(DisasContext *s, DisasOps *o)
5215 {
5216     int r1 = get_field(s, r1);
5217     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5218 }
5219 #define SPEC_wout_r1_8 0
5220 
5221 static void wout_r1_16(DisasContext *s, DisasOps *o)
5222 {
5223     int r1 = get_field(s, r1);
5224     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5225 }
5226 #define SPEC_wout_r1_16 0
5227 
5228 static void wout_r1_32(DisasContext *s, DisasOps *o)
5229 {
5230     store_reg32_i64(get_field(s, r1), o->out);
5231 }
5232 #define SPEC_wout_r1_32 0
5233 
5234 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5235 {
5236     store_reg32h_i64(get_field(s, r1), o->out);
5237 }
5238 #define SPEC_wout_r1_32h 0
5239 
5240 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5241 {
5242     int r1 = get_field(s, r1);
5243     store_reg32_i64(r1, o->out);
5244     store_reg32_i64(r1 + 1, o->out2);
5245 }
5246 #define SPEC_wout_r1_P32 SPEC_r1_even
5247 
5248 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5249 {
5250     int r1 = get_field(s, r1);
5251     TCGv_i64 t = tcg_temp_new_i64();
5252     store_reg32_i64(r1 + 1, o->out);
5253     tcg_gen_shri_i64(t, o->out, 32);
5254     store_reg32_i64(r1, t);
5255 }
5256 #define SPEC_wout_r1_D32 SPEC_r1_even
5257 
5258 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5259 {
5260     int r1 = get_field(s, r1);
5261     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5262 }
5263 #define SPEC_wout_r1_D64 SPEC_r1_even
5264 
5265 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5266 {
5267     int r3 = get_field(s, r3);
5268     store_reg32_i64(r3, o->out);
5269     store_reg32_i64(r3 + 1, o->out2);
5270 }
5271 #define SPEC_wout_r3_P32 SPEC_r3_even
5272 
5273 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5274 {
5275     int r3 = get_field(s, r3);
5276     store_reg(r3, o->out);
5277     store_reg(r3 + 1, o->out2);
5278 }
5279 #define SPEC_wout_r3_P64 SPEC_r3_even
5280 
5281 static void wout_e1(DisasContext *s, DisasOps *o)
5282 {
5283     store_freg32_i64(get_field(s, r1), o->out);
5284 }
5285 #define SPEC_wout_e1 0
5286 
5287 static void wout_f1(DisasContext *s, DisasOps *o)
5288 {
5289     store_freg(get_field(s, r1), o->out);
5290 }
5291 #define SPEC_wout_f1 0
5292 
5293 static void wout_x1(DisasContext *s, DisasOps *o)
5294 {
5295     int f1 = get_field(s, r1);
5296 
5297     /* Split out_128 into out+out2 for cout_f128. */
5298     tcg_debug_assert(o->out == NULL);
5299     o->out = tcg_temp_new_i64();
5300     o->out2 = tcg_temp_new_i64();
5301 
5302     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5303     store_freg(f1, o->out);
5304     store_freg(f1 + 2, o->out2);
5305 }
5306 #define SPEC_wout_x1 SPEC_r1_f128
5307 
5308 static void wout_x1_P(DisasContext *s, DisasOps *o)
5309 {
5310     int f1 = get_field(s, r1);
5311     store_freg(f1, o->out);
5312     store_freg(f1 + 2, o->out2);
5313 }
5314 #define SPEC_wout_x1_P SPEC_r1_f128
5315 
5316 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5317 {
5318     if (get_field(s, r1) != get_field(s, r2)) {
5319         store_reg32_i64(get_field(s, r1), o->out);
5320     }
5321 }
5322 #define SPEC_wout_cond_r1r2_32 0
5323 
5324 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5325 {
5326     if (get_field(s, r1) != get_field(s, r2)) {
5327         store_freg32_i64(get_field(s, r1), o->out);
5328     }
5329 }
5330 #define SPEC_wout_cond_e1e2 0
5331 
5332 static void wout_m1_8(DisasContext *s, DisasOps *o)
5333 {
5334     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5335 }
5336 #define SPEC_wout_m1_8 0
5337 
5338 static void wout_m1_16(DisasContext *s, DisasOps *o)
5339 {
5340     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5341 }
5342 #define SPEC_wout_m1_16 0
5343 
5344 #ifndef CONFIG_USER_ONLY
5345 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5346 {
5347     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5348 }
5349 #define SPEC_wout_m1_16a 0
5350 #endif
5351 
5352 static void wout_m1_32(DisasContext *s, DisasOps *o)
5353 {
5354     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5355 }
5356 #define SPEC_wout_m1_32 0
5357 
5358 #ifndef CONFIG_USER_ONLY
5359 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5360 {
5361     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5362 }
5363 #define SPEC_wout_m1_32a 0
5364 #endif
5365 
5366 static void wout_m1_64(DisasContext *s, DisasOps *o)
5367 {
5368     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5369 }
5370 #define SPEC_wout_m1_64 0
5371 
5372 #ifndef CONFIG_USER_ONLY
5373 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5374 {
5375     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5376 }
5377 #define SPEC_wout_m1_64a 0
5378 #endif
5379 
5380 static void wout_m2_32(DisasContext *s, DisasOps *o)
5381 {
5382     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5383 }
5384 #define SPEC_wout_m2_32 0
5385 
5386 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5387 {
5388     store_reg(get_field(s, r1), o->in2);
5389 }
5390 #define SPEC_wout_in2_r1 0
5391 
5392 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5393 {
5394     store_reg32_i64(get_field(s, r1), o->in2);
5395 }
5396 #define SPEC_wout_in2_r1_32 0
5397 
5398 /* ====================================================================== */
5399 /* The "INput 1" generators.  These load the first operand to an insn.  */
5400 
5401 static void in1_r1(DisasContext *s, DisasOps *o)
5402 {
5403     o->in1 = load_reg(get_field(s, r1));
5404 }
5405 #define SPEC_in1_r1 0
5406 
5407 static void in1_r1_o(DisasContext *s, DisasOps *o)
5408 {
5409     o->in1 = regs[get_field(s, r1)];
5410 }
5411 #define SPEC_in1_r1_o 0
5412 
5413 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5414 {
5415     o->in1 = tcg_temp_new_i64();
5416     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5417 }
5418 #define SPEC_in1_r1_32s 0
5419 
5420 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5421 {
5422     o->in1 = tcg_temp_new_i64();
5423     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5424 }
5425 #define SPEC_in1_r1_32u 0
5426 
5427 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5428 {
5429     o->in1 = tcg_temp_new_i64();
5430     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5431 }
5432 #define SPEC_in1_r1_sr32 0
5433 
5434 static void in1_r1p1(DisasContext *s, DisasOps *o)
5435 {
5436     o->in1 = load_reg(get_field(s, r1) + 1);
5437 }
5438 #define SPEC_in1_r1p1 SPEC_r1_even
5439 
5440 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5441 {
5442     o->in1 = regs[get_field(s, r1) + 1];
5443 }
5444 #define SPEC_in1_r1p1_o SPEC_r1_even
5445 
5446 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5447 {
5448     o->in1 = tcg_temp_new_i64();
5449     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5450 }
5451 #define SPEC_in1_r1p1_32s SPEC_r1_even
5452 
5453 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5454 {
5455     o->in1 = tcg_temp_new_i64();
5456     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5457 }
5458 #define SPEC_in1_r1p1_32u SPEC_r1_even
5459 
5460 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5461 {
5462     int r1 = get_field(s, r1);
5463     o->in1 = tcg_temp_new_i64();
5464     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5465 }
5466 #define SPEC_in1_r1_D32 SPEC_r1_even
5467 
5468 static void in1_r2(DisasContext *s, DisasOps *o)
5469 {
5470     o->in1 = load_reg(get_field(s, r2));
5471 }
5472 #define SPEC_in1_r2 0
5473 
5474 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5475 {
5476     o->in1 = tcg_temp_new_i64();
5477     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5478 }
5479 #define SPEC_in1_r2_sr32 0
5480 
5481 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5482 {
5483     o->in1 = tcg_temp_new_i64();
5484     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5485 }
5486 #define SPEC_in1_r2_32u 0
5487 
5488 static void in1_r3(DisasContext *s, DisasOps *o)
5489 {
5490     o->in1 = load_reg(get_field(s, r3));
5491 }
5492 #define SPEC_in1_r3 0
5493 
5494 static void in1_r3_o(DisasContext *s, DisasOps *o)
5495 {
5496     o->in1 = regs[get_field(s, r3)];
5497 }
5498 #define SPEC_in1_r3_o 0
5499 
5500 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5501 {
5502     o->in1 = tcg_temp_new_i64();
5503     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5504 }
5505 #define SPEC_in1_r3_32s 0
5506 
5507 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5508 {
5509     o->in1 = tcg_temp_new_i64();
5510     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5511 }
5512 #define SPEC_in1_r3_32u 0
5513 
5514 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5515 {
5516     int r3 = get_field(s, r3);
5517     o->in1 = tcg_temp_new_i64();
5518     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5519 }
5520 #define SPEC_in1_r3_D32 SPEC_r3_even
5521 
5522 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5523 {
5524     o->in1 = tcg_temp_new_i64();
5525     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5526 }
5527 #define SPEC_in1_r3_sr32 0
5528 
5529 static void in1_e1(DisasContext *s, DisasOps *o)
5530 {
5531     o->in1 = load_freg32_i64(get_field(s, r1));
5532 }
5533 #define SPEC_in1_e1 0
5534 
5535 static void in1_f1(DisasContext *s, DisasOps *o)
5536 {
5537     o->in1 = load_freg(get_field(s, r1));
5538 }
5539 #define SPEC_in1_f1 0
5540 
5541 static void in1_x1(DisasContext *s, DisasOps *o)
5542 {
5543     o->in1_128 = load_freg_128(get_field(s, r1));
5544 }
5545 #define SPEC_in1_x1 SPEC_r1_f128
5546 
5547 /* Load the high double word of an extended (128-bit) format FP number */
5548 static void in1_x2h(DisasContext *s, DisasOps *o)
5549 {
5550     o->in1 = load_freg(get_field(s, r2));
5551 }
5552 #define SPEC_in1_x2h SPEC_r2_f128
5553 
5554 static void in1_f3(DisasContext *s, DisasOps *o)
5555 {
5556     o->in1 = load_freg(get_field(s, r3));
5557 }
5558 #define SPEC_in1_f3 0
5559 
5560 static void in1_la1(DisasContext *s, DisasOps *o)
5561 {
5562     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5563 }
5564 #define SPEC_in1_la1 0
5565 
5566 static void in1_la2(DisasContext *s, DisasOps *o)
5567 {
5568     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5569     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5570 }
5571 #define SPEC_in1_la2 0
5572 
5573 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5574 {
5575     in1_la1(s, o);
5576     o->in1 = tcg_temp_new_i64();
5577     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5578 }
5579 #define SPEC_in1_m1_8u 0
5580 
5581 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5582 {
5583     in1_la1(s, o);
5584     o->in1 = tcg_temp_new_i64();
5585     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5586 }
5587 #define SPEC_in1_m1_16s 0
5588 
5589 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5590 {
5591     in1_la1(s, o);
5592     o->in1 = tcg_temp_new_i64();
5593     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5594 }
5595 #define SPEC_in1_m1_16u 0
5596 
5597 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5598 {
5599     in1_la1(s, o);
5600     o->in1 = tcg_temp_new_i64();
5601     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5602 }
5603 #define SPEC_in1_m1_32s 0
5604 
5605 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5606 {
5607     in1_la1(s, o);
5608     o->in1 = tcg_temp_new_i64();
5609     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5610 }
5611 #define SPEC_in1_m1_32u 0
5612 
5613 static void in1_m1_64(DisasContext *s, DisasOps *o)
5614 {
5615     in1_la1(s, o);
5616     o->in1 = tcg_temp_new_i64();
5617     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5618 }
5619 #define SPEC_in1_m1_64 0
5620 
5621 /* ====================================================================== */
5622 /* The "INput 2" generators.  These load the second operand to an insn.  */
5623 
5624 static void in2_r1_o(DisasContext *s, DisasOps *o)
5625 {
5626     o->in2 = regs[get_field(s, r1)];
5627 }
5628 #define SPEC_in2_r1_o 0
5629 
5630 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5631 {
5632     o->in2 = tcg_temp_new_i64();
5633     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5634 }
5635 #define SPEC_in2_r1_16u 0
5636 
5637 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5638 {
5639     o->in2 = tcg_temp_new_i64();
5640     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5641 }
5642 #define SPEC_in2_r1_32u 0
5643 
5644 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5645 {
5646     int r1 = get_field(s, r1);
5647     o->in2 = tcg_temp_new_i64();
5648     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5649 }
5650 #define SPEC_in2_r1_D32 SPEC_r1_even
5651 
5652 static void in2_r2(DisasContext *s, DisasOps *o)
5653 {
5654     o->in2 = load_reg(get_field(s, r2));
5655 }
5656 #define SPEC_in2_r2 0
5657 
5658 static void in2_r2_o(DisasContext *s, DisasOps *o)
5659 {
5660     o->in2 = regs[get_field(s, r2)];
5661 }
5662 #define SPEC_in2_r2_o 0
5663 
5664 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5665 {
5666     int r2 = get_field(s, r2);
5667     if (r2 != 0) {
5668         o->in2 = load_reg(r2);
5669     }
5670 }
5671 #define SPEC_in2_r2_nz 0
5672 
5673 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5674 {
5675     o->in2 = tcg_temp_new_i64();
5676     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5677 }
5678 #define SPEC_in2_r2_8s 0
5679 
5680 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5681 {
5682     o->in2 = tcg_temp_new_i64();
5683     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5684 }
5685 #define SPEC_in2_r2_8u 0
5686 
5687 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5688 {
5689     o->in2 = tcg_temp_new_i64();
5690     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5691 }
5692 #define SPEC_in2_r2_16s 0
5693 
5694 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5695 {
5696     o->in2 = tcg_temp_new_i64();
5697     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5698 }
5699 #define SPEC_in2_r2_16u 0
5700 
5701 static void in2_r3(DisasContext *s, DisasOps *o)
5702 {
5703     o->in2 = load_reg(get_field(s, r3));
5704 }
5705 #define SPEC_in2_r3 0
5706 
5707 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5708 {
5709     int r3 = get_field(s, r3);
5710     o->in2_128 = tcg_temp_new_i128();
5711     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5712 }
5713 #define SPEC_in2_r3_D64 SPEC_r3_even
5714 
5715 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5716 {
5717     o->in2 = tcg_temp_new_i64();
5718     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5719 }
5720 #define SPEC_in2_r3_sr32 0
5721 
5722 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5723 {
5724     o->in2 = tcg_temp_new_i64();
5725     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5726 }
5727 #define SPEC_in2_r3_32u 0
5728 
5729 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5730 {
5731     o->in2 = tcg_temp_new_i64();
5732     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5733 }
5734 #define SPEC_in2_r2_32s 0
5735 
5736 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5737 {
5738     o->in2 = tcg_temp_new_i64();
5739     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5740 }
5741 #define SPEC_in2_r2_32u 0
5742 
5743 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5744 {
5745     o->in2 = tcg_temp_new_i64();
5746     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5747 }
5748 #define SPEC_in2_r2_sr32 0
5749 
5750 static void in2_e2(DisasContext *s, DisasOps *o)
5751 {
5752     o->in2 = load_freg32_i64(get_field(s, r2));
5753 }
5754 #define SPEC_in2_e2 0
5755 
5756 static void in2_f2(DisasContext *s, DisasOps *o)
5757 {
5758     o->in2 = load_freg(get_field(s, r2));
5759 }
5760 #define SPEC_in2_f2 0
5761 
5762 static void in2_x2(DisasContext *s, DisasOps *o)
5763 {
5764     o->in2_128 = load_freg_128(get_field(s, r2));
5765 }
5766 #define SPEC_in2_x2 SPEC_r2_f128
5767 
5768 /* Load the low double word of an extended (128-bit) format FP number */
5769 static void in2_x2l(DisasContext *s, DisasOps *o)
5770 {
5771     o->in2 = load_freg(get_field(s, r2) + 2);
5772 }
5773 #define SPEC_in2_x2l SPEC_r2_f128
5774 
5775 static void in2_ra2(DisasContext *s, DisasOps *o)
5776 {
5777     int r2 = get_field(s, r2);
5778 
5779     /* Note: *don't* treat !r2 as 0, use the reg value. */
5780     o->in2 = tcg_temp_new_i64();
5781     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5782 }
5783 #define SPEC_in2_ra2 0
5784 
5785 static void in2_a2(DisasContext *s, DisasOps *o)
5786 {
5787     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5788     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5789 }
5790 #define SPEC_in2_a2 0
5791 
5792 static TCGv gen_ri2(DisasContext *s)
5793 {
5794     TCGv ri2 = NULL;
5795     bool is_imm;
5796     int imm;
5797 
5798     disas_jdest(s, i2, is_imm, imm, ri2);
5799     if (is_imm) {
5800         ri2 = tcg_constant_i64(s->base.pc_next + imm * 2);
5801     }
5802 
5803     return ri2;
5804 }
5805 
5806 static void in2_ri2(DisasContext *s, DisasOps *o)
5807 {
5808     o->in2 = gen_ri2(s);
5809 }
5810 #define SPEC_in2_ri2 0
5811 
5812 static void in2_sh(DisasContext *s, DisasOps *o)
5813 {
5814     int b2 = get_field(s, b2);
5815     int d2 = get_field(s, d2);
5816 
5817     if (b2 == 0) {
5818         o->in2 = tcg_constant_i64(d2 & 0x3f);
5819     } else {
5820         o->in2 = get_address(s, 0, b2, d2);
5821         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5822     }
5823 }
5824 #define SPEC_in2_sh 0
5825 
5826 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5827 {
5828     in2_a2(s, o);
5829     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5830 }
5831 #define SPEC_in2_m2_8u 0
5832 
5833 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5834 {
5835     in2_a2(s, o);
5836     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5837 }
5838 #define SPEC_in2_m2_16s 0
5839 
5840 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5841 {
5842     in2_a2(s, o);
5843     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5844 }
5845 #define SPEC_in2_m2_16u 0
5846 
5847 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5848 {
5849     in2_a2(s, o);
5850     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5851 }
5852 #define SPEC_in2_m2_32s 0
5853 
5854 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5855 {
5856     in2_a2(s, o);
5857     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5858 }
5859 #define SPEC_in2_m2_32u 0
5860 
5861 #ifndef CONFIG_USER_ONLY
5862 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5863 {
5864     in2_a2(s, o);
5865     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5866 }
5867 #define SPEC_in2_m2_32ua 0
5868 #endif
5869 
5870 static void in2_m2_64(DisasContext *s, DisasOps *o)
5871 {
5872     in2_a2(s, o);
5873     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5874 }
5875 #define SPEC_in2_m2_64 0
5876 
5877 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5878 {
5879     in2_a2(s, o);
5880     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5881     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5882 }
5883 #define SPEC_in2_m2_64w 0
5884 
5885 #ifndef CONFIG_USER_ONLY
5886 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5887 {
5888     in2_a2(s, o);
5889     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5890 }
5891 #define SPEC_in2_m2_64a 0
5892 #endif
5893 
5894 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5895 {
5896     o->in2 = tcg_temp_new_i64();
5897     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5898 }
5899 #define SPEC_in2_mri2_16s 0
5900 
5901 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5902 {
5903     o->in2 = tcg_temp_new_i64();
5904     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5905 }
5906 #define SPEC_in2_mri2_16u 0
5907 
5908 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5909 {
5910     o->in2 = tcg_temp_new_i64();
5911     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5912                        MO_TESL | MO_ALIGN);
5913 }
5914 #define SPEC_in2_mri2_32s 0
5915 
5916 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5917 {
5918     o->in2 = tcg_temp_new_i64();
5919     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5920                        MO_TEUL | MO_ALIGN);
5921 }
5922 #define SPEC_in2_mri2_32u 0
5923 
5924 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5925 {
5926     o->in2 = tcg_temp_new_i64();
5927     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5928                         MO_TEUQ | MO_ALIGN);
5929 }
5930 #define SPEC_in2_mri2_64 0
5931 
5932 static void in2_i2(DisasContext *s, DisasOps *o)
5933 {
5934     o->in2 = tcg_constant_i64(get_field(s, i2));
5935 }
5936 #define SPEC_in2_i2 0
5937 
5938 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5939 {
5940     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5941 }
5942 #define SPEC_in2_i2_8u 0
5943 
5944 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5945 {
5946     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5947 }
5948 #define SPEC_in2_i2_16u 0
5949 
5950 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5951 {
5952     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5953 }
5954 #define SPEC_in2_i2_32u 0
5955 
5956 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5957 {
5958     uint64_t i2 = (uint16_t)get_field(s, i2);
5959     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5960 }
5961 #define SPEC_in2_i2_16u_shl 0
5962 
5963 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5964 {
5965     uint64_t i2 = (uint32_t)get_field(s, i2);
5966     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5967 }
5968 #define SPEC_in2_i2_32u_shl 0
5969 
5970 #ifndef CONFIG_USER_ONLY
5971 static void in2_insn(DisasContext *s, DisasOps *o)
5972 {
5973     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5974 }
5975 #define SPEC_in2_insn 0
5976 #endif
5977 
5978 /* ====================================================================== */
5979 
5980 /* Find opc within the table of insns.  This is formulated as a switch
5981    statement so that (1) we get compile-time notice of cut-paste errors
5982    for duplicated opcodes, and (2) the compiler generates the binary
5983    search tree, rather than us having to post-process the table.  */
5984 
5985 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5986     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5987 
5988 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5989     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
5990 
5991 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
5992     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
5993 
5994 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
5995 
5996 enum DisasInsnEnum {
5997 #include "insn-data.h.inc"
5998 };
5999 
6000 #undef E
6001 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6002     .opc = OPC,                                                             \
6003     .flags = FL,                                                            \
6004     .fmt = FMT_##FT,                                                        \
6005     .fac = FAC_##FC,                                                        \
6006     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6007     .name = #NM,                                                            \
6008     .help_in1 = in1_##I1,                                                   \
6009     .help_in2 = in2_##I2,                                                   \
6010     .help_prep = prep_##P,                                                  \
6011     .help_wout = wout_##W,                                                  \
6012     .help_cout = cout_##CC,                                                 \
6013     .help_op = op_##OP,                                                     \
6014     .data = D                                                               \
6015  },
6016 
6017 /* Allow 0 to be used for NULL in the table below.  */
6018 #define in1_0  NULL
6019 #define in2_0  NULL
6020 #define prep_0  NULL
6021 #define wout_0  NULL
6022 #define cout_0  NULL
6023 #define op_0  NULL
6024 
6025 #define SPEC_in1_0 0
6026 #define SPEC_in2_0 0
6027 #define SPEC_prep_0 0
6028 #define SPEC_wout_0 0
6029 
6030 /* Give smaller names to the various facilities.  */
6031 #define FAC_Z           S390_FEAT_ZARCH
6032 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6033 #define FAC_DFP         S390_FEAT_DFP
6034 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6035 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6036 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6037 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6038 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6039 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6040 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6041 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6042 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6043 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6044 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6045 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6046 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6047 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6048 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6049 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6050 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6051 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6052 #define FAC_SFLE        S390_FEAT_STFLE
6053 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6054 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6055 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6056 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6057 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6058 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6059 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6060 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6061 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6062 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6063 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6064 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6065 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6066 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6067 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6068 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6069 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6070 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6071 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6072 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6073 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6074 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6075 
6076 static const DisasInsn insn_info[] = {
6077 #include "insn-data.h.inc"
6078 };
6079 
6080 #undef E
6081 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6082     case OPC: return &insn_info[insn_ ## NM];
6083 
6084 static const DisasInsn *lookup_opc(uint16_t opc)
6085 {
6086     switch (opc) {
6087 #include "insn-data.h.inc"
6088     default:
6089         return NULL;
6090     }
6091 }
6092 
6093 #undef F
6094 #undef E
6095 #undef D
6096 #undef C
6097 
6098 /* Extract a field from the insn.  The INSN should be left-aligned in
6099    the uint64_t so that we can more easily utilize the big-bit-endian
6100    definitions we extract from the Principals of Operation.  */
6101 
6102 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6103 {
6104     uint32_t r, m;
6105 
6106     if (f->size == 0) {
6107         return;
6108     }
6109 
6110     /* Zero extract the field from the insn.  */
6111     r = (insn << f->beg) >> (64 - f->size);
6112 
6113     /* Sign-extend, or un-swap the field as necessary.  */
6114     switch (f->type) {
6115     case 0: /* unsigned */
6116         break;
6117     case 1: /* signed */
6118         assert(f->size <= 32);
6119         m = 1u << (f->size - 1);
6120         r = (r ^ m) - m;
6121         break;
6122     case 2: /* dl+dh split, signed 20 bit. */
6123         r = ((int8_t)r << 12) | (r >> 8);
6124         break;
6125     case 3: /* MSB stored in RXB */
6126         g_assert(f->size == 4);
6127         switch (f->beg) {
6128         case 8:
6129             r |= extract64(insn, 63 - 36, 1) << 4;
6130             break;
6131         case 12:
6132             r |= extract64(insn, 63 - 37, 1) << 4;
6133             break;
6134         case 16:
6135             r |= extract64(insn, 63 - 38, 1) << 4;
6136             break;
6137         case 32:
6138             r |= extract64(insn, 63 - 39, 1) << 4;
6139             break;
6140         default:
6141             g_assert_not_reached();
6142         }
6143         break;
6144     default:
6145         abort();
6146     }
6147 
6148     /*
6149      * Validate that the "compressed" encoding we selected above is valid.
6150      * I.e. we haven't made two different original fields overlap.
6151      */
6152     assert(((o->presentC >> f->indexC) & 1) == 0);
6153     o->presentC |= 1 << f->indexC;
6154     o->presentO |= 1 << f->indexO;
6155 
6156     o->c[f->indexC] = r;
6157 }
6158 
6159 /* Lookup the insn at the current PC, extracting the operands into O and
6160    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6161 
6162 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6163 {
6164     uint64_t insn, pc = s->base.pc_next;
6165     int op, op2, ilen;
6166     const DisasInsn *info;
6167 
6168     if (unlikely(s->ex_value)) {
6169         /* Drop the EX data now, so that it's clear on exception paths.  */
6170         tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
6171                        offsetof(CPUS390XState, ex_value));
6172 
6173         /* Extract the values saved by EXECUTE.  */
6174         insn = s->ex_value & 0xffffffffffff0000ull;
6175         ilen = s->ex_value & 0xf;
6176 
6177         /* Register insn bytes with translator so plugins work. */
6178         for (int i = 0; i < ilen; i++) {
6179             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6180             translator_fake_ldb(byte, pc + i);
6181         }
6182         op = insn >> 56;
6183     } else {
6184         insn = ld_code2(env, s, pc);
6185         op = (insn >> 8) & 0xff;
6186         ilen = get_ilen(op);
6187         switch (ilen) {
6188         case 2:
6189             insn = insn << 48;
6190             break;
6191         case 4:
6192             insn = ld_code4(env, s, pc) << 32;
6193             break;
6194         case 6:
6195             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6196             break;
6197         default:
6198             g_assert_not_reached();
6199         }
6200     }
6201     s->pc_tmp = s->base.pc_next + ilen;
6202     s->ilen = ilen;
6203 
6204     /* We can't actually determine the insn format until we've looked up
6205        the full insn opcode.  Which we can't do without locating the
6206        secondary opcode.  Assume by default that OP2 is at bit 40; for
6207        those smaller insns that don't actually have a secondary opcode
6208        this will correctly result in OP2 = 0. */
6209     switch (op) {
6210     case 0x01: /* E */
6211     case 0x80: /* S */
6212     case 0x82: /* S */
6213     case 0x93: /* S */
6214     case 0xb2: /* S, RRF, RRE, IE */
6215     case 0xb3: /* RRE, RRD, RRF */
6216     case 0xb9: /* RRE, RRF */
6217     case 0xe5: /* SSE, SIL */
6218         op2 = (insn << 8) >> 56;
6219         break;
6220     case 0xa5: /* RI */
6221     case 0xa7: /* RI */
6222     case 0xc0: /* RIL */
6223     case 0xc2: /* RIL */
6224     case 0xc4: /* RIL */
6225     case 0xc6: /* RIL */
6226     case 0xc8: /* SSF */
6227     case 0xcc: /* RIL */
6228         op2 = (insn << 12) >> 60;
6229         break;
6230     case 0xc5: /* MII */
6231     case 0xc7: /* SMI */
6232     case 0xd0 ... 0xdf: /* SS */
6233     case 0xe1: /* SS */
6234     case 0xe2: /* SS */
6235     case 0xe8: /* SS */
6236     case 0xe9: /* SS */
6237     case 0xea: /* SS */
6238     case 0xee ... 0xf3: /* SS */
6239     case 0xf8 ... 0xfd: /* SS */
6240         op2 = 0;
6241         break;
6242     default:
6243         op2 = (insn << 40) >> 56;
6244         break;
6245     }
6246 
6247     memset(&s->fields, 0, sizeof(s->fields));
6248     s->fields.raw_insn = insn;
6249     s->fields.op = op;
6250     s->fields.op2 = op2;
6251 
6252     /* Lookup the instruction.  */
6253     info = lookup_opc(op << 8 | op2);
6254     s->insn = info;
6255 
6256     /* If we found it, extract the operands.  */
6257     if (info != NULL) {
6258         DisasFormat fmt = info->fmt;
6259         int i;
6260 
6261         for (i = 0; i < NUM_C_FIELD; ++i) {
6262             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6263         }
6264     }
6265     return info;
6266 }
6267 
6268 static bool is_afp_reg(int reg)
6269 {
6270     return reg % 2 || reg > 6;
6271 }
6272 
6273 static bool is_fp_pair(int reg)
6274 {
6275     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6276     return !(reg & 0x2);
6277 }
6278 
6279 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6280 {
6281     const DisasInsn *insn;
6282     DisasJumpType ret = DISAS_NEXT;
6283     DisasOps o = {};
6284     bool icount = false;
6285 
6286     /* Search for the insn in the table.  */
6287     insn = extract_insn(env, s);
6288 
6289     /* Update insn_start now that we know the ILEN.  */
6290     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6291 
6292     /* Not found means unimplemented/illegal opcode.  */
6293     if (insn == NULL) {
6294         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6295                       s->fields.op, s->fields.op2);
6296         gen_illegal_opcode(s);
6297         ret = DISAS_NORETURN;
6298         goto out;
6299     }
6300 
6301 #ifndef CONFIG_USER_ONLY
6302     if (s->base.tb->flags & FLAG_MASK_PER) {
6303         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6304         gen_helper_per_ifetch(cpu_env, addr);
6305     }
6306 #endif
6307 
6308     /* process flags */
6309     if (insn->flags) {
6310         /* privileged instruction */
6311         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6312             gen_program_exception(s, PGM_PRIVILEGED);
6313             ret = DISAS_NORETURN;
6314             goto out;
6315         }
6316 
6317         /* if AFP is not enabled, instructions and registers are forbidden */
6318         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6319             uint8_t dxc = 0;
6320 
6321             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6322                 dxc = 1;
6323             }
6324             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6325                 dxc = 1;
6326             }
6327             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6328                 dxc = 1;
6329             }
6330             if (insn->flags & IF_BFP) {
6331                 dxc = 2;
6332             }
6333             if (insn->flags & IF_DFP) {
6334                 dxc = 3;
6335             }
6336             if (insn->flags & IF_VEC) {
6337                 dxc = 0xfe;
6338             }
6339             if (dxc) {
6340                 gen_data_exception(dxc);
6341                 ret = DISAS_NORETURN;
6342                 goto out;
6343             }
6344         }
6345 
6346         /* if vector instructions not enabled, executing them is forbidden */
6347         if (insn->flags & IF_VEC) {
6348             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6349                 gen_data_exception(0xfe);
6350                 ret = DISAS_NORETURN;
6351                 goto out;
6352             }
6353         }
6354 
6355         /* input/output is the special case for icount mode */
6356         if (unlikely(insn->flags & IF_IO)) {
6357             icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6358             if (icount) {
6359                 gen_io_start();
6360             }
6361         }
6362     }
6363 
6364     /* Check for insn specification exceptions.  */
6365     if (insn->spec) {
6366         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6367             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6368             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6369             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6370             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6371             gen_program_exception(s, PGM_SPECIFICATION);
6372             ret = DISAS_NORETURN;
6373             goto out;
6374         }
6375     }
6376 
6377     /* Implement the instruction.  */
6378     if (insn->help_in1) {
6379         insn->help_in1(s, &o);
6380     }
6381     if (insn->help_in2) {
6382         insn->help_in2(s, &o);
6383     }
6384     if (insn->help_prep) {
6385         insn->help_prep(s, &o);
6386     }
6387     if (insn->help_op) {
6388         ret = insn->help_op(s, &o);
6389     }
6390     if (ret != DISAS_NORETURN) {
6391         if (insn->help_wout) {
6392             insn->help_wout(s, &o);
6393         }
6394         if (insn->help_cout) {
6395             insn->help_cout(s, &o);
6396         }
6397     }
6398 
6399     /* io should be the last instruction in tb when icount is enabled */
6400     if (unlikely(icount && ret == DISAS_NEXT)) {
6401         ret = DISAS_TOO_MANY;
6402     }
6403 
6404 #ifndef CONFIG_USER_ONLY
6405     if (s->base.tb->flags & FLAG_MASK_PER) {
6406         /* An exception might be triggered, save PSW if not already done.  */
6407         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6408             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6409         }
6410 
6411         /* Call the helper to check for a possible PER exception.  */
6412         gen_helper_per_check_exception(cpu_env);
6413     }
6414 #endif
6415 
6416 out:
6417     /* Advance to the next instruction.  */
6418     s->base.pc_next = s->pc_tmp;
6419     return ret;
6420 }
6421 
6422 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6423 {
6424     DisasContext *dc = container_of(dcbase, DisasContext, base);
6425 
6426     /* 31-bit mode */
6427     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6428         dc->base.pc_first &= 0x7fffffff;
6429         dc->base.pc_next = dc->base.pc_first;
6430     }
6431 
6432     dc->cc_op = CC_OP_DYNAMIC;
6433     dc->ex_value = dc->base.tb->cs_base;
6434     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6435 }
6436 
6437 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6438 {
6439 }
6440 
6441 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6442 {
6443     DisasContext *dc = container_of(dcbase, DisasContext, base);
6444 
6445     /* Delay the set of ilen until we've read the insn. */
6446     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6447     dc->insn_start = tcg_last_op();
6448 }
6449 
6450 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6451                                 uint64_t pc)
6452 {
6453     uint64_t insn = cpu_lduw_code(env, pc);
6454 
6455     return pc + get_ilen((insn >> 8) & 0xff);
6456 }
6457 
6458 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6459 {
6460     CPUS390XState *env = cs->env_ptr;
6461     DisasContext *dc = container_of(dcbase, DisasContext, base);
6462 
6463     dc->base.is_jmp = translate_one(env, dc);
6464     if (dc->base.is_jmp == DISAS_NEXT) {
6465         if (dc->ex_value ||
6466             !is_same_page(dcbase, dc->base.pc_next) ||
6467             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6468             dc->base.is_jmp = DISAS_TOO_MANY;
6469         }
6470     }
6471 }
6472 
6473 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6474 {
6475     DisasContext *dc = container_of(dcbase, DisasContext, base);
6476 
6477     switch (dc->base.is_jmp) {
6478     case DISAS_NORETURN:
6479         break;
6480     case DISAS_TOO_MANY:
6481         update_psw_addr(dc);
6482         /* FALLTHRU */
6483     case DISAS_PC_UPDATED:
6484         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6485            cc op type is in env */
6486         update_cc_op(dc);
6487         /* FALLTHRU */
6488     case DISAS_PC_CC_UPDATED:
6489         /* Exit the TB, either by raising a debug exception or by return.  */
6490         if (dc->exit_to_mainloop) {
6491             tcg_gen_exit_tb(NULL, 0);
6492         } else {
6493             tcg_gen_lookup_and_goto_ptr();
6494         }
6495         break;
6496     default:
6497         g_assert_not_reached();
6498     }
6499 }
6500 
6501 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6502                                CPUState *cs, FILE *logfile)
6503 {
6504     DisasContext *dc = container_of(dcbase, DisasContext, base);
6505 
6506     if (unlikely(dc->ex_value)) {
6507         /* ??? Unfortunately target_disas can't use host memory.  */
6508         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6509     } else {
6510         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6511         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6512     }
6513 }
6514 
6515 static const TranslatorOps s390x_tr_ops = {
6516     .init_disas_context = s390x_tr_init_disas_context,
6517     .tb_start           = s390x_tr_tb_start,
6518     .insn_start         = s390x_tr_insn_start,
6519     .translate_insn     = s390x_tr_translate_insn,
6520     .tb_stop            = s390x_tr_tb_stop,
6521     .disas_log          = s390x_tr_disas_log,
6522 };
6523 
6524 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6525                            target_ulong pc, void *host_pc)
6526 {
6527     DisasContext dc;
6528 
6529     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6530 }
6531 
6532 void s390x_restore_state_to_opc(CPUState *cs,
6533                                 const TranslationBlock *tb,
6534                                 const uint64_t *data)
6535 {
6536     S390CPU *cpu = S390_CPU(cs);
6537     CPUS390XState *env = &cpu->env;
6538     int cc_op = data[1];
6539 
6540     env->psw.addr = data[0];
6541 
6542     /* Update the CC opcode if it is not already up-to-date.  */
6543     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6544         env->cc_op = cc_op;
6545     }
6546 
6547     /* Record ILEN.  */
6548     env->int_pgm_ilen = data[2];
6549 }
6550