xref: /openbmc/qemu/target/s390x/tcg/translate.c (revision 1141159c)
1 /*
2  *  S/390 translation
3  *
4  *  Copyright (c) 2009 Ulrich Hecht
5  *  Copyright (c) 2010 Alexander Graf
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
24 
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 #  define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 #  define LOG_DISAS(...) do { } while (0)
29 #endif
30 
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "s390x-internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg/tcg-op.h"
37 #include "tcg/tcg-op-gvec.h"
38 #include "qemu/log.h"
39 #include "qemu/host-utils.h"
40 #include "exec/cpu_ldst.h"
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
44 
45 #include "exec/translator.h"
46 #include "exec/log.h"
47 #include "qemu/atomic128.h"
48 
49 
50 /* Information that (most) every instruction needs to manipulate.  */
51 typedef struct DisasContext DisasContext;
52 typedef struct DisasInsn DisasInsn;
53 typedef struct DisasFields DisasFields;
54 
55 /*
56  * Define a structure to hold the decoded fields.  We'll store each inside
57  * an array indexed by an enum.  In order to conserve memory, we'll arrange
58  * for fields that do not exist at the same time to overlap, thus the "C"
59  * for compact.  For checking purposes there is an "O" for original index
60  * as well that will be applied to availability bitmaps.
61  */
62 
63 enum DisasFieldIndexO {
64     FLD_O_r1,
65     FLD_O_r2,
66     FLD_O_r3,
67     FLD_O_m1,
68     FLD_O_m3,
69     FLD_O_m4,
70     FLD_O_m5,
71     FLD_O_m6,
72     FLD_O_b1,
73     FLD_O_b2,
74     FLD_O_b4,
75     FLD_O_d1,
76     FLD_O_d2,
77     FLD_O_d4,
78     FLD_O_x2,
79     FLD_O_l1,
80     FLD_O_l2,
81     FLD_O_i1,
82     FLD_O_i2,
83     FLD_O_i3,
84     FLD_O_i4,
85     FLD_O_i5,
86     FLD_O_v1,
87     FLD_O_v2,
88     FLD_O_v3,
89     FLD_O_v4,
90 };
91 
92 enum DisasFieldIndexC {
93     FLD_C_r1 = 0,
94     FLD_C_m1 = 0,
95     FLD_C_b1 = 0,
96     FLD_C_i1 = 0,
97     FLD_C_v1 = 0,
98 
99     FLD_C_r2 = 1,
100     FLD_C_b2 = 1,
101     FLD_C_i2 = 1,
102 
103     FLD_C_r3 = 2,
104     FLD_C_m3 = 2,
105     FLD_C_i3 = 2,
106     FLD_C_v3 = 2,
107 
108     FLD_C_m4 = 3,
109     FLD_C_b4 = 3,
110     FLD_C_i4 = 3,
111     FLD_C_l1 = 3,
112     FLD_C_v4 = 3,
113 
114     FLD_C_i5 = 4,
115     FLD_C_d1 = 4,
116     FLD_C_m5 = 4,
117 
118     FLD_C_d2 = 5,
119     FLD_C_m6 = 5,
120 
121     FLD_C_d4 = 6,
122     FLD_C_x2 = 6,
123     FLD_C_l2 = 6,
124     FLD_C_v2 = 6,
125 
126     NUM_C_FIELD = 7
127 };
128 
129 struct DisasFields {
130     uint64_t raw_insn;
131     unsigned op:8;
132     unsigned op2:8;
133     unsigned presentC:16;
134     unsigned int presentO;
135     int c[NUM_C_FIELD];
136 };
137 
138 struct DisasContext {
139     DisasContextBase base;
140     const DisasInsn *insn;
141     TCGOp *insn_start;
142     DisasFields fields;
143     uint64_t ex_value;
144     /*
145      * During translate_one(), pc_tmp is used to determine the instruction
146      * to be executed after base.pc_next - e.g. next sequential instruction
147      * or a branch target.
148      */
149     uint64_t pc_tmp;
150     uint32_t ilen;
151     enum cc_op cc_op;
152     bool exit_to_mainloop;
153 };
154 
155 /* Information carried about a condition to be evaluated.  */
156 typedef struct {
157     TCGCond cond:8;
158     bool is_64;
159     union {
160         struct { TCGv_i64 a, b; } s64;
161         struct { TCGv_i32 a, b; } s32;
162     } u;
163 } DisasCompare;
164 
165 #ifdef DEBUG_INLINE_BRANCHES
166 static uint64_t inline_branch_hit[CC_OP_MAX];
167 static uint64_t inline_branch_miss[CC_OP_MAX];
168 #endif
169 
170 static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc)
171 {
172     if (s->base.tb->flags & FLAG_MASK_32) {
173         if (s->base.tb->flags & FLAG_MASK_64) {
174             tcg_gen_movi_i64(out, pc);
175             return;
176         }
177         pc |= 0x80000000;
178     }
179     assert(!(s->base.tb->flags & FLAG_MASK_64));
180     tcg_gen_deposit_i64(out, out, tcg_constant_i64(pc), 0, 32);
181 }
182 
183 static TCGv_i64 psw_addr;
184 static TCGv_i64 psw_mask;
185 static TCGv_i64 gbea;
186 
187 static TCGv_i32 cc_op;
188 static TCGv_i64 cc_src;
189 static TCGv_i64 cc_dst;
190 static TCGv_i64 cc_vr;
191 
192 static char cpu_reg_names[16][4];
193 static TCGv_i64 regs[16];
194 
195 void s390x_translate_init(void)
196 {
197     int i;
198 
199     psw_addr = tcg_global_mem_new_i64(cpu_env,
200                                       offsetof(CPUS390XState, psw.addr),
201                                       "psw_addr");
202     psw_mask = tcg_global_mem_new_i64(cpu_env,
203                                       offsetof(CPUS390XState, psw.mask),
204                                       "psw_mask");
205     gbea = tcg_global_mem_new_i64(cpu_env,
206                                   offsetof(CPUS390XState, gbea),
207                                   "gbea");
208 
209     cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
210                                    "cc_op");
211     cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
212                                     "cc_src");
213     cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
214                                     "cc_dst");
215     cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
216                                    "cc_vr");
217 
218     for (i = 0; i < 16; i++) {
219         snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
220         regs[i] = tcg_global_mem_new(cpu_env,
221                                      offsetof(CPUS390XState, regs[i]),
222                                      cpu_reg_names[i]);
223     }
224 }
225 
226 static inline int vec_full_reg_offset(uint8_t reg)
227 {
228     g_assert(reg < 32);
229     return offsetof(CPUS390XState, vregs[reg][0]);
230 }
231 
232 static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es)
233 {
234     /* Convert element size (es) - e.g. MO_8 - to bytes */
235     const uint8_t bytes = 1 << es;
236     int offs = enr * bytes;
237 
238     /*
239      * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte
240      * of the 16 byte vector, on both, little and big endian systems.
241      *
242      * Big Endian (target/possible host)
243      * B:  [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15]
244      * HW: [     0][     1][     2][     3] - [     4][     5][     6][     7]
245      * W:  [             0][             1] - [             2][             3]
246      * DW: [                             0] - [                             1]
247      *
248      * Little Endian (possible host)
249      * B:  [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8]
250      * HW: [     3][     2][     1][     0] - [     7][     6][     5][     4]
251      * W:  [             1][             0] - [             3][             2]
252      * DW: [                             0] - [                             1]
253      *
254      * For 16 byte elements, the two 8 byte halves will not form a host
255      * int128 if the host is little endian, since they're in the wrong order.
256      * Some operations (e.g. xor) do not care. For operations like addition,
257      * the two 8 byte elements have to be loaded separately. Let's force all
258      * 16 byte operations to handle it in a special way.
259      */
260     g_assert(es <= MO_64);
261 #if !HOST_BIG_ENDIAN
262     offs ^= (8 - bytes);
263 #endif
264     return offs + vec_full_reg_offset(reg);
265 }
266 
267 static inline int freg64_offset(uint8_t reg)
268 {
269     g_assert(reg < 16);
270     return vec_reg_offset(reg, 0, MO_64);
271 }
272 
273 static inline int freg32_offset(uint8_t reg)
274 {
275     g_assert(reg < 16);
276     return vec_reg_offset(reg, 0, MO_32);
277 }
278 
279 static TCGv_i64 load_reg(int reg)
280 {
281     TCGv_i64 r = tcg_temp_new_i64();
282     tcg_gen_mov_i64(r, regs[reg]);
283     return r;
284 }
285 
286 static TCGv_i64 load_freg(int reg)
287 {
288     TCGv_i64 r = tcg_temp_new_i64();
289 
290     tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg));
291     return r;
292 }
293 
294 static TCGv_i64 load_freg32_i64(int reg)
295 {
296     TCGv_i64 r = tcg_temp_new_i64();
297 
298     tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg));
299     return r;
300 }
301 
302 static TCGv_i128 load_freg_128(int reg)
303 {
304     TCGv_i64 h = load_freg(reg);
305     TCGv_i64 l = load_freg(reg + 2);
306     TCGv_i128 r = tcg_temp_new_i128();
307 
308     tcg_gen_concat_i64_i128(r, l, h);
309     return r;
310 }
311 
312 static void store_reg(int reg, TCGv_i64 v)
313 {
314     tcg_gen_mov_i64(regs[reg], v);
315 }
316 
317 static void store_freg(int reg, TCGv_i64 v)
318 {
319     tcg_gen_st_i64(v, cpu_env, freg64_offset(reg));
320 }
321 
322 static void store_reg32_i64(int reg, TCGv_i64 v)
323 {
324     /* 32 bit register writes keep the upper half */
325     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
326 }
327 
328 static void store_reg32h_i64(int reg, TCGv_i64 v)
329 {
330     tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
331 }
332 
333 static void store_freg32_i64(int reg, TCGv_i64 v)
334 {
335     tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg));
336 }
337 
338 static void return_low128(TCGv_i64 dest)
339 {
340     tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
341 }
342 
343 static void update_psw_addr(DisasContext *s)
344 {
345     /* psw.addr */
346     tcg_gen_movi_i64(psw_addr, s->base.pc_next);
347 }
348 
349 static void per_branch(DisasContext *s, bool to_next)
350 {
351 #ifndef CONFIG_USER_ONLY
352     tcg_gen_movi_i64(gbea, s->base.pc_next);
353 
354     if (s->base.tb->flags & FLAG_MASK_PER) {
355         TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
356         gen_helper_per_branch(cpu_env, gbea, next_pc);
357     }
358 #endif
359 }
360 
361 static void per_branch_cond(DisasContext *s, TCGCond cond,
362                             TCGv_i64 arg1, TCGv_i64 arg2)
363 {
364 #ifndef CONFIG_USER_ONLY
365     if (s->base.tb->flags & FLAG_MASK_PER) {
366         TCGLabel *lab = gen_new_label();
367         tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
368 
369         tcg_gen_movi_i64(gbea, s->base.pc_next);
370         gen_helper_per_branch(cpu_env, gbea, psw_addr);
371 
372         gen_set_label(lab);
373     } else {
374         TCGv_i64 pc = tcg_constant_i64(s->base.pc_next);
375         tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
376     }
377 #endif
378 }
379 
380 static void per_breaking_event(DisasContext *s)
381 {
382     tcg_gen_movi_i64(gbea, s->base.pc_next);
383 }
384 
385 static void update_cc_op(DisasContext *s)
386 {
387     if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
388         tcg_gen_movi_i32(cc_op, s->cc_op);
389     }
390 }
391 
392 static inline uint64_t ld_code2(CPUS390XState *env, DisasContext *s,
393                                 uint64_t pc)
394 {
395     return (uint64_t)translator_lduw(env, &s->base, pc);
396 }
397 
398 static inline uint64_t ld_code4(CPUS390XState *env, DisasContext *s,
399                                 uint64_t pc)
400 {
401     return (uint64_t)(uint32_t)translator_ldl(env, &s->base, pc);
402 }
403 
404 static int get_mem_index(DisasContext *s)
405 {
406 #ifdef CONFIG_USER_ONLY
407     return MMU_USER_IDX;
408 #else
409     if (!(s->base.tb->flags & FLAG_MASK_DAT)) {
410         return MMU_REAL_IDX;
411     }
412 
413     switch (s->base.tb->flags & FLAG_MASK_ASC) {
414     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
415         return MMU_PRIMARY_IDX;
416     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
417         return MMU_SECONDARY_IDX;
418     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
419         return MMU_HOME_IDX;
420     default:
421         g_assert_not_reached();
422         break;
423     }
424 #endif
425 }
426 
427 static void gen_exception(int excp)
428 {
429     gen_helper_exception(cpu_env, tcg_constant_i32(excp));
430 }
431 
432 static void gen_program_exception(DisasContext *s, int code)
433 {
434     /* Remember what pgm exeption this was.  */
435     tcg_gen_st_i32(tcg_constant_i32(code), cpu_env,
436                    offsetof(CPUS390XState, int_pgm_code));
437 
438     tcg_gen_st_i32(tcg_constant_i32(s->ilen), cpu_env,
439                    offsetof(CPUS390XState, int_pgm_ilen));
440 
441     /* update the psw */
442     update_psw_addr(s);
443 
444     /* Save off cc.  */
445     update_cc_op(s);
446 
447     /* Trigger exception.  */
448     gen_exception(EXCP_PGM);
449 }
450 
451 static inline void gen_illegal_opcode(DisasContext *s)
452 {
453     gen_program_exception(s, PGM_OPERATION);
454 }
455 
456 static inline void gen_data_exception(uint8_t dxc)
457 {
458     gen_helper_data_exception(cpu_env, tcg_constant_i32(dxc));
459 }
460 
461 static inline void gen_trap(DisasContext *s)
462 {
463     /* Set DXC to 0xff */
464     gen_data_exception(0xff);
465 }
466 
467 static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src,
468                                   int64_t imm)
469 {
470     tcg_gen_addi_i64(dst, src, imm);
471     if (!(s->base.tb->flags & FLAG_MASK_64)) {
472         if (s->base.tb->flags & FLAG_MASK_32) {
473             tcg_gen_andi_i64(dst, dst, 0x7fffffff);
474         } else {
475             tcg_gen_andi_i64(dst, dst, 0x00ffffff);
476         }
477     }
478 }
479 
480 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
481 {
482     TCGv_i64 tmp = tcg_temp_new_i64();
483 
484     /*
485      * Note that d2 is limited to 20 bits, signed.  If we crop negative
486      * displacements early we create larger immediate addends.
487      */
488     if (b2 && x2) {
489         tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
490         gen_addi_and_wrap_i64(s, tmp, tmp, d2);
491     } else if (b2) {
492         gen_addi_and_wrap_i64(s, tmp, regs[b2], d2);
493     } else if (x2) {
494         gen_addi_and_wrap_i64(s, tmp, regs[x2], d2);
495     } else if (!(s->base.tb->flags & FLAG_MASK_64)) {
496         if (s->base.tb->flags & FLAG_MASK_32) {
497             tcg_gen_movi_i64(tmp, d2 & 0x7fffffff);
498         } else {
499             tcg_gen_movi_i64(tmp, d2 & 0x00ffffff);
500         }
501     } else {
502         tcg_gen_movi_i64(tmp, d2);
503     }
504 
505     return tmp;
506 }
507 
508 static inline bool live_cc_data(DisasContext *s)
509 {
510     return (s->cc_op != CC_OP_DYNAMIC
511             && s->cc_op != CC_OP_STATIC
512             && s->cc_op > 3);
513 }
514 
515 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
516 {
517     if (live_cc_data(s)) {
518         tcg_gen_discard_i64(cc_src);
519         tcg_gen_discard_i64(cc_dst);
520         tcg_gen_discard_i64(cc_vr);
521     }
522     s->cc_op = CC_OP_CONST0 + val;
523 }
524 
525 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
526 {
527     if (live_cc_data(s)) {
528         tcg_gen_discard_i64(cc_src);
529         tcg_gen_discard_i64(cc_vr);
530     }
531     tcg_gen_mov_i64(cc_dst, dst);
532     s->cc_op = op;
533 }
534 
535 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
536                                   TCGv_i64 dst)
537 {
538     if (live_cc_data(s)) {
539         tcg_gen_discard_i64(cc_vr);
540     }
541     tcg_gen_mov_i64(cc_src, src);
542     tcg_gen_mov_i64(cc_dst, dst);
543     s->cc_op = op;
544 }
545 
546 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
547                                   TCGv_i64 dst, TCGv_i64 vr)
548 {
549     tcg_gen_mov_i64(cc_src, src);
550     tcg_gen_mov_i64(cc_dst, dst);
551     tcg_gen_mov_i64(cc_vr, vr);
552     s->cc_op = op;
553 }
554 
555 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
556 {
557     gen_op_update1_cc_i64(s, CC_OP_NZ, val);
558 }
559 
560 /* CC value is in env->cc_op */
561 static void set_cc_static(DisasContext *s)
562 {
563     if (live_cc_data(s)) {
564         tcg_gen_discard_i64(cc_src);
565         tcg_gen_discard_i64(cc_dst);
566         tcg_gen_discard_i64(cc_vr);
567     }
568     s->cc_op = CC_OP_STATIC;
569 }
570 
571 /* calculates cc into cc_op */
572 static void gen_op_calc_cc(DisasContext *s)
573 {
574     TCGv_i32 local_cc_op = NULL;
575     TCGv_i64 dummy = NULL;
576 
577     switch (s->cc_op) {
578     default:
579         dummy = tcg_constant_i64(0);
580         /* FALLTHRU */
581     case CC_OP_ADD_64:
582     case CC_OP_SUB_64:
583     case CC_OP_ADD_32:
584     case CC_OP_SUB_32:
585         local_cc_op = tcg_constant_i32(s->cc_op);
586         break;
587     case CC_OP_CONST0:
588     case CC_OP_CONST1:
589     case CC_OP_CONST2:
590     case CC_OP_CONST3:
591     case CC_OP_STATIC:
592     case CC_OP_DYNAMIC:
593         break;
594     }
595 
596     switch (s->cc_op) {
597     case CC_OP_CONST0:
598     case CC_OP_CONST1:
599     case CC_OP_CONST2:
600     case CC_OP_CONST3:
601         /* s->cc_op is the cc value */
602         tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
603         break;
604     case CC_OP_STATIC:
605         /* env->cc_op already is the cc value */
606         break;
607     case CC_OP_NZ:
608         tcg_gen_setcondi_i64(TCG_COND_NE, cc_dst, cc_dst, 0);
609         tcg_gen_extrl_i64_i32(cc_op, cc_dst);
610         break;
611     case CC_OP_ABS_64:
612     case CC_OP_NABS_64:
613     case CC_OP_ABS_32:
614     case CC_OP_NABS_32:
615     case CC_OP_LTGT0_32:
616     case CC_OP_LTGT0_64:
617     case CC_OP_COMP_32:
618     case CC_OP_COMP_64:
619     case CC_OP_NZ_F32:
620     case CC_OP_NZ_F64:
621     case CC_OP_FLOGR:
622     case CC_OP_LCBB:
623     case CC_OP_MULS_32:
624         /* 1 argument */
625         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
626         break;
627     case CC_OP_ADDU:
628     case CC_OP_ICM:
629     case CC_OP_LTGT_32:
630     case CC_OP_LTGT_64:
631     case CC_OP_LTUGTU_32:
632     case CC_OP_LTUGTU_64:
633     case CC_OP_TM_32:
634     case CC_OP_TM_64:
635     case CC_OP_SLA:
636     case CC_OP_SUBU:
637     case CC_OP_NZ_F128:
638     case CC_OP_VC:
639     case CC_OP_MULS_64:
640         /* 2 arguments */
641         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
642         break;
643     case CC_OP_ADD_64:
644     case CC_OP_SUB_64:
645     case CC_OP_ADD_32:
646     case CC_OP_SUB_32:
647         /* 3 arguments */
648         gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
649         break;
650     case CC_OP_DYNAMIC:
651         /* unknown operation - assume 3 arguments and cc_op in env */
652         gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
653         break;
654     default:
655         g_assert_not_reached();
656     }
657 
658     /* We now have cc in cc_op as constant */
659     set_cc_static(s);
660 }
661 
662 static bool use_goto_tb(DisasContext *s, uint64_t dest)
663 {
664     if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
665         return false;
666     }
667     return translator_use_goto_tb(&s->base, dest);
668 }
669 
670 static void account_noninline_branch(DisasContext *s, int cc_op)
671 {
672 #ifdef DEBUG_INLINE_BRANCHES
673     inline_branch_miss[cc_op]++;
674 #endif
675 }
676 
677 static void account_inline_branch(DisasContext *s, int cc_op)
678 {
679 #ifdef DEBUG_INLINE_BRANCHES
680     inline_branch_hit[cc_op]++;
681 #endif
682 }
683 
684 /* Table of mask values to comparison codes, given a comparison as input.
685    For such, CC=3 should not be possible.  */
686 static const TCGCond ltgt_cond[16] = {
687     TCG_COND_NEVER,  TCG_COND_NEVER,     /*    |    |    | x */
688     TCG_COND_GT,     TCG_COND_GT,        /*    |    | GT | x */
689     TCG_COND_LT,     TCG_COND_LT,        /*    | LT |    | x */
690     TCG_COND_NE,     TCG_COND_NE,        /*    | LT | GT | x */
691     TCG_COND_EQ,     TCG_COND_EQ,        /* EQ |    |    | x */
692     TCG_COND_GE,     TCG_COND_GE,        /* EQ |    | GT | x */
693     TCG_COND_LE,     TCG_COND_LE,        /* EQ | LT |    | x */
694     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | LT | GT | x */
695 };
696 
697 /* Table of mask values to comparison codes, given a logic op as input.
698    For such, only CC=0 and CC=1 should be possible.  */
699 static const TCGCond nz_cond[16] = {
700     TCG_COND_NEVER, TCG_COND_NEVER,      /*    |    | x | x */
701     TCG_COND_NEVER, TCG_COND_NEVER,
702     TCG_COND_NE, TCG_COND_NE,            /*    | NE | x | x */
703     TCG_COND_NE, TCG_COND_NE,
704     TCG_COND_EQ, TCG_COND_EQ,            /* EQ |    | x | x */
705     TCG_COND_EQ, TCG_COND_EQ,
706     TCG_COND_ALWAYS, TCG_COND_ALWAYS,    /* EQ | NE | x | x */
707     TCG_COND_ALWAYS, TCG_COND_ALWAYS,
708 };
709 
710 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
711    details required to generate a TCG comparison.  */
712 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
713 {
714     TCGCond cond;
715     enum cc_op old_cc_op = s->cc_op;
716 
717     if (mask == 15 || mask == 0) {
718         c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
719         c->u.s32.a = cc_op;
720         c->u.s32.b = cc_op;
721         c->is_64 = false;
722         return;
723     }
724 
725     /* Find the TCG condition for the mask + cc op.  */
726     switch (old_cc_op) {
727     case CC_OP_LTGT0_32:
728     case CC_OP_LTGT0_64:
729     case CC_OP_LTGT_32:
730     case CC_OP_LTGT_64:
731         cond = ltgt_cond[mask];
732         if (cond == TCG_COND_NEVER) {
733             goto do_dynamic;
734         }
735         account_inline_branch(s, old_cc_op);
736         break;
737 
738     case CC_OP_LTUGTU_32:
739     case CC_OP_LTUGTU_64:
740         cond = tcg_unsigned_cond(ltgt_cond[mask]);
741         if (cond == TCG_COND_NEVER) {
742             goto do_dynamic;
743         }
744         account_inline_branch(s, old_cc_op);
745         break;
746 
747     case CC_OP_NZ:
748         cond = nz_cond[mask];
749         if (cond == TCG_COND_NEVER) {
750             goto do_dynamic;
751         }
752         account_inline_branch(s, old_cc_op);
753         break;
754 
755     case CC_OP_TM_32:
756     case CC_OP_TM_64:
757         switch (mask) {
758         case 8:
759             cond = TCG_COND_EQ;
760             break;
761         case 4 | 2 | 1:
762             cond = TCG_COND_NE;
763             break;
764         default:
765             goto do_dynamic;
766         }
767         account_inline_branch(s, old_cc_op);
768         break;
769 
770     case CC_OP_ICM:
771         switch (mask) {
772         case 8:
773             cond = TCG_COND_EQ;
774             break;
775         case 4 | 2 | 1:
776         case 4 | 2:
777             cond = TCG_COND_NE;
778             break;
779         default:
780             goto do_dynamic;
781         }
782         account_inline_branch(s, old_cc_op);
783         break;
784 
785     case CC_OP_FLOGR:
786         switch (mask & 0xa) {
787         case 8: /* src == 0 -> no one bit found */
788             cond = TCG_COND_EQ;
789             break;
790         case 2: /* src != 0 -> one bit found */
791             cond = TCG_COND_NE;
792             break;
793         default:
794             goto do_dynamic;
795         }
796         account_inline_branch(s, old_cc_op);
797         break;
798 
799     case CC_OP_ADDU:
800     case CC_OP_SUBU:
801         switch (mask) {
802         case 8 | 2: /* result == 0 */
803             cond = TCG_COND_EQ;
804             break;
805         case 4 | 1: /* result != 0 */
806             cond = TCG_COND_NE;
807             break;
808         case 8 | 4: /* !carry (borrow) */
809             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_EQ : TCG_COND_NE;
810             break;
811         case 2 | 1: /* carry (!borrow) */
812             cond = old_cc_op == CC_OP_ADDU ? TCG_COND_NE : TCG_COND_EQ;
813             break;
814         default:
815             goto do_dynamic;
816         }
817         account_inline_branch(s, old_cc_op);
818         break;
819 
820     default:
821     do_dynamic:
822         /* Calculate cc value.  */
823         gen_op_calc_cc(s);
824         /* FALLTHRU */
825 
826     case CC_OP_STATIC:
827         /* Jump based on CC.  We'll load up the real cond below;
828            the assignment here merely avoids a compiler warning.  */
829         account_noninline_branch(s, old_cc_op);
830         old_cc_op = CC_OP_STATIC;
831         cond = TCG_COND_NEVER;
832         break;
833     }
834 
835     /* Load up the arguments of the comparison.  */
836     c->is_64 = true;
837     switch (old_cc_op) {
838     case CC_OP_LTGT0_32:
839         c->is_64 = false;
840         c->u.s32.a = tcg_temp_new_i32();
841         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
842         c->u.s32.b = tcg_constant_i32(0);
843         break;
844     case CC_OP_LTGT_32:
845     case CC_OP_LTUGTU_32:
846         c->is_64 = false;
847         c->u.s32.a = tcg_temp_new_i32();
848         tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
849         c->u.s32.b = tcg_temp_new_i32();
850         tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
851         break;
852 
853     case CC_OP_LTGT0_64:
854     case CC_OP_NZ:
855     case CC_OP_FLOGR:
856         c->u.s64.a = cc_dst;
857         c->u.s64.b = tcg_constant_i64(0);
858         break;
859     case CC_OP_LTGT_64:
860     case CC_OP_LTUGTU_64:
861         c->u.s64.a = cc_src;
862         c->u.s64.b = cc_dst;
863         break;
864 
865     case CC_OP_TM_32:
866     case CC_OP_TM_64:
867     case CC_OP_ICM:
868         c->u.s64.a = tcg_temp_new_i64();
869         c->u.s64.b = tcg_constant_i64(0);
870         tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
871         break;
872 
873     case CC_OP_ADDU:
874     case CC_OP_SUBU:
875         c->is_64 = true;
876         c->u.s64.b = tcg_constant_i64(0);
877         switch (mask) {
878         case 8 | 2:
879         case 4 | 1: /* result */
880             c->u.s64.a = cc_dst;
881             break;
882         case 8 | 4:
883         case 2 | 1: /* carry */
884             c->u.s64.a = cc_src;
885             break;
886         default:
887             g_assert_not_reached();
888         }
889         break;
890 
891     case CC_OP_STATIC:
892         c->is_64 = false;
893         c->u.s32.a = cc_op;
894         switch (mask) {
895         case 0x8 | 0x4 | 0x2: /* cc != 3 */
896             cond = TCG_COND_NE;
897             c->u.s32.b = tcg_constant_i32(3);
898             break;
899         case 0x8 | 0x4 | 0x1: /* cc != 2 */
900             cond = TCG_COND_NE;
901             c->u.s32.b = tcg_constant_i32(2);
902             break;
903         case 0x8 | 0x2 | 0x1: /* cc != 1 */
904             cond = TCG_COND_NE;
905             c->u.s32.b = tcg_constant_i32(1);
906             break;
907         case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
908             cond = TCG_COND_EQ;
909             c->u.s32.a = tcg_temp_new_i32();
910             c->u.s32.b = tcg_constant_i32(0);
911             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
912             break;
913         case 0x8 | 0x4: /* cc < 2 */
914             cond = TCG_COND_LTU;
915             c->u.s32.b = tcg_constant_i32(2);
916             break;
917         case 0x8: /* cc == 0 */
918             cond = TCG_COND_EQ;
919             c->u.s32.b = tcg_constant_i32(0);
920             break;
921         case 0x4 | 0x2 | 0x1: /* cc != 0 */
922             cond = TCG_COND_NE;
923             c->u.s32.b = tcg_constant_i32(0);
924             break;
925         case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
926             cond = TCG_COND_NE;
927             c->u.s32.a = tcg_temp_new_i32();
928             c->u.s32.b = tcg_constant_i32(0);
929             tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
930             break;
931         case 0x4: /* cc == 1 */
932             cond = TCG_COND_EQ;
933             c->u.s32.b = tcg_constant_i32(1);
934             break;
935         case 0x2 | 0x1: /* cc > 1 */
936             cond = TCG_COND_GTU;
937             c->u.s32.b = tcg_constant_i32(1);
938             break;
939         case 0x2: /* cc == 2 */
940             cond = TCG_COND_EQ;
941             c->u.s32.b = tcg_constant_i32(2);
942             break;
943         case 0x1: /* cc == 3 */
944             cond = TCG_COND_EQ;
945             c->u.s32.b = tcg_constant_i32(3);
946             break;
947         default:
948             /* CC is masked by something else: (8 >> cc) & mask.  */
949             cond = TCG_COND_NE;
950             c->u.s32.a = tcg_temp_new_i32();
951             c->u.s32.b = tcg_constant_i32(0);
952             tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
953             tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
954             break;
955         }
956         break;
957 
958     default:
959         abort();
960     }
961     c->cond = cond;
962 }
963 
964 /* ====================================================================== */
965 /* Define the insn format enumeration.  */
966 #define F0(N)                         FMT_##N,
967 #define F1(N, X1)                     F0(N)
968 #define F2(N, X1, X2)                 F0(N)
969 #define F3(N, X1, X2, X3)             F0(N)
970 #define F4(N, X1, X2, X3, X4)         F0(N)
971 #define F5(N, X1, X2, X3, X4, X5)     F0(N)
972 #define F6(N, X1, X2, X3, X4, X5, X6) F0(N)
973 
974 typedef enum {
975 #include "insn-format.h.inc"
976 } DisasFormat;
977 
978 #undef F0
979 #undef F1
980 #undef F2
981 #undef F3
982 #undef F4
983 #undef F5
984 #undef F6
985 
986 /* This is the way fields are to be accessed out of DisasFields.  */
987 #define have_field(S, F)  have_field1((S), FLD_O_##F)
988 #define get_field(S, F)   get_field1((S), FLD_O_##F, FLD_C_##F)
989 
990 static bool have_field1(const DisasContext *s, enum DisasFieldIndexO c)
991 {
992     return (s->fields.presentO >> c) & 1;
993 }
994 
995 static int get_field1(const DisasContext *s, enum DisasFieldIndexO o,
996                       enum DisasFieldIndexC c)
997 {
998     assert(have_field1(s, o));
999     return s->fields.c[c];
1000 }
1001 
1002 /* Describe the layout of each field in each format.  */
1003 typedef struct DisasField {
1004     unsigned int beg:8;
1005     unsigned int size:8;
1006     unsigned int type:2;
1007     unsigned int indexC:6;
1008     enum DisasFieldIndexO indexO:8;
1009 } DisasField;
1010 
1011 typedef struct DisasFormatInfo {
1012     DisasField op[NUM_C_FIELD];
1013 } DisasFormatInfo;
1014 
1015 #define R(N, B)       {  B,  4, 0, FLD_C_r##N, FLD_O_r##N }
1016 #define M(N, B)       {  B,  4, 0, FLD_C_m##N, FLD_O_m##N }
1017 #define V(N, B)       {  B,  4, 3, FLD_C_v##N, FLD_O_v##N }
1018 #define BD(N, BB, BD) { BB,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1019                       { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1020 #define BXD(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1021                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1022                       { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1023 #define BDL(N)        { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1024                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1025 #define BXDL(N)       { 16,  4, 0, FLD_C_b##N, FLD_O_b##N }, \
1026                       { 12,  4, 0, FLD_C_x##N, FLD_O_x##N }, \
1027                       { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1028 #define I(N, B, S)    {  B,  S, 1, FLD_C_i##N, FLD_O_i##N }
1029 #define L(N, B, S)    {  B,  S, 0, FLD_C_l##N, FLD_O_l##N }
1030 
1031 #define F0(N)                     { { } },
1032 #define F1(N, X1)                 { { X1 } },
1033 #define F2(N, X1, X2)             { { X1, X2 } },
1034 #define F3(N, X1, X2, X3)         { { X1, X2, X3 } },
1035 #define F4(N, X1, X2, X3, X4)     { { X1, X2, X3, X4 } },
1036 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1037 #define F6(N, X1, X2, X3, X4, X5, X6)       { { X1, X2, X3, X4, X5, X6 } },
1038 
1039 static const DisasFormatInfo format_info[] = {
1040 #include "insn-format.h.inc"
1041 };
1042 
1043 #undef F0
1044 #undef F1
1045 #undef F2
1046 #undef F3
1047 #undef F4
1048 #undef F5
1049 #undef F6
1050 #undef R
1051 #undef M
1052 #undef V
1053 #undef BD
1054 #undef BXD
1055 #undef BDL
1056 #undef BXDL
1057 #undef I
1058 #undef L
1059 
1060 /* Generally, we'll extract operands into this structures, operate upon
1061    them, and store them back.  See the "in1", "in2", "prep", "wout" sets
1062    of routines below for more details.  */
1063 typedef struct {
1064     TCGv_i64 out, out2, in1, in2;
1065     TCGv_i64 addr1;
1066     TCGv_i128 out_128, in1_128, in2_128;
1067 } DisasOps;
1068 
1069 /* Instructions can place constraints on their operands, raising specification
1070    exceptions if they are violated.  To make this easy to automate, each "in1",
1071    "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1072    of the following, or 0.  To make this easy to document, we'll put the
1073    SPEC_<name> defines next to <name>.  */
1074 
1075 #define SPEC_r1_even    1
1076 #define SPEC_r2_even    2
1077 #define SPEC_r3_even    4
1078 #define SPEC_r1_f128    8
1079 #define SPEC_r2_f128    16
1080 
1081 /* Return values from translate_one, indicating the state of the TB.  */
1082 
1083 /* We are not using a goto_tb (for whatever reason), but have updated
1084    the PC (for whatever reason), so there's no need to do it again on
1085    exiting the TB.  */
1086 #define DISAS_PC_UPDATED        DISAS_TARGET_0
1087 
1088 /* We have updated the PC and CC values.  */
1089 #define DISAS_PC_CC_UPDATED     DISAS_TARGET_2
1090 
1091 
1092 /* Instruction flags */
1093 #define IF_AFP1     0x0001      /* r1 is a fp reg for HFP/FPS instructions */
1094 #define IF_AFP2     0x0002      /* r2 is a fp reg for HFP/FPS instructions */
1095 #define IF_AFP3     0x0004      /* r3 is a fp reg for HFP/FPS instructions */
1096 #define IF_BFP      0x0008      /* binary floating point instruction */
1097 #define IF_DFP      0x0010      /* decimal floating point instruction */
1098 #define IF_PRIV     0x0020      /* privileged instruction */
1099 #define IF_VEC      0x0040      /* vector instruction */
1100 #define IF_IO       0x0080      /* input/output instruction */
1101 
1102 struct DisasInsn {
1103     unsigned opc:16;
1104     unsigned flags:16;
1105     DisasFormat fmt:8;
1106     unsigned fac:8;
1107     unsigned spec:8;
1108 
1109     const char *name;
1110 
1111     /* Pre-process arguments before HELP_OP.  */
1112     void (*help_in1)(DisasContext *, DisasOps *);
1113     void (*help_in2)(DisasContext *, DisasOps *);
1114     void (*help_prep)(DisasContext *, DisasOps *);
1115 
1116     /*
1117      * Post-process output after HELP_OP.
1118      * Note that these are not called if HELP_OP returns DISAS_NORETURN.
1119      */
1120     void (*help_wout)(DisasContext *, DisasOps *);
1121     void (*help_cout)(DisasContext *, DisasOps *);
1122 
1123     /* Implement the operation itself.  */
1124     DisasJumpType (*help_op)(DisasContext *, DisasOps *);
1125 
1126     uint64_t data;
1127 };
1128 
1129 /* ====================================================================== */
1130 /* Miscellaneous helpers, used by several operations.  */
1131 
1132 static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest)
1133 {
1134     if (dest == s->pc_tmp) {
1135         per_branch(s, true);
1136         return DISAS_NEXT;
1137     }
1138     if (use_goto_tb(s, dest)) {
1139         update_cc_op(s);
1140         per_breaking_event(s);
1141         tcg_gen_goto_tb(0);
1142         tcg_gen_movi_i64(psw_addr, dest);
1143         tcg_gen_exit_tb(s->base.tb, 0);
1144         return DISAS_NORETURN;
1145     } else {
1146         tcg_gen_movi_i64(psw_addr, dest);
1147         per_branch(s, false);
1148         return DISAS_PC_UPDATED;
1149     }
1150 }
1151 
1152 static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
1153                                  bool is_imm, int imm, TCGv_i64 cdest)
1154 {
1155     DisasJumpType ret;
1156     uint64_t dest = s->base.pc_next + (int64_t)imm * 2;
1157     TCGLabel *lab;
1158 
1159     /* Take care of the special cases first.  */
1160     if (c->cond == TCG_COND_NEVER) {
1161         ret = DISAS_NEXT;
1162         goto egress;
1163     }
1164     if (is_imm) {
1165         if (dest == s->pc_tmp) {
1166             /* Branch to next.  */
1167             per_branch(s, true);
1168             ret = DISAS_NEXT;
1169             goto egress;
1170         }
1171         if (c->cond == TCG_COND_ALWAYS) {
1172             ret = help_goto_direct(s, dest);
1173             goto egress;
1174         }
1175     } else {
1176         if (!cdest) {
1177             /* E.g. bcr %r0 -> no branch.  */
1178             ret = DISAS_NEXT;
1179             goto egress;
1180         }
1181         if (c->cond == TCG_COND_ALWAYS) {
1182             tcg_gen_mov_i64(psw_addr, cdest);
1183             per_branch(s, false);
1184             ret = DISAS_PC_UPDATED;
1185             goto egress;
1186         }
1187     }
1188 
1189     if (use_goto_tb(s, s->pc_tmp)) {
1190         if (is_imm && use_goto_tb(s, dest)) {
1191             /* Both exits can use goto_tb.  */
1192             update_cc_op(s);
1193 
1194             lab = gen_new_label();
1195             if (c->is_64) {
1196                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1197             } else {
1198                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1199             }
1200 
1201             /* Branch not taken.  */
1202             tcg_gen_goto_tb(0);
1203             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1204             tcg_gen_exit_tb(s->base.tb, 0);
1205 
1206             /* Branch taken.  */
1207             gen_set_label(lab);
1208             per_breaking_event(s);
1209             tcg_gen_goto_tb(1);
1210             tcg_gen_movi_i64(psw_addr, dest);
1211             tcg_gen_exit_tb(s->base.tb, 1);
1212 
1213             ret = DISAS_NORETURN;
1214         } else {
1215             /* Fallthru can use goto_tb, but taken branch cannot.  */
1216             /* Store taken branch destination before the brcond.  This
1217                avoids having to allocate a new local temp to hold it.
1218                We'll overwrite this in the not taken case anyway.  */
1219             if (!is_imm) {
1220                 tcg_gen_mov_i64(psw_addr, cdest);
1221             }
1222 
1223             lab = gen_new_label();
1224             if (c->is_64) {
1225                 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1226             } else {
1227                 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1228             }
1229 
1230             /* Branch not taken.  */
1231             update_cc_op(s);
1232             tcg_gen_goto_tb(0);
1233             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
1234             tcg_gen_exit_tb(s->base.tb, 0);
1235 
1236             gen_set_label(lab);
1237             if (is_imm) {
1238                 tcg_gen_movi_i64(psw_addr, dest);
1239             }
1240             per_breaking_event(s);
1241             ret = DISAS_PC_UPDATED;
1242         }
1243     } else {
1244         /* Fallthru cannot use goto_tb.  This by itself is vanishingly rare.
1245            Most commonly we're single-stepping or some other condition that
1246            disables all use of goto_tb.  Just update the PC and exit.  */
1247 
1248         TCGv_i64 next = tcg_constant_i64(s->pc_tmp);
1249         if (is_imm) {
1250             cdest = tcg_constant_i64(dest);
1251         }
1252 
1253         if (c->is_64) {
1254             tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1255                                 cdest, next);
1256             per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1257         } else {
1258             TCGv_i32 t0 = tcg_temp_new_i32();
1259             TCGv_i64 t1 = tcg_temp_new_i64();
1260             TCGv_i64 z = tcg_constant_i64(0);
1261             tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1262             tcg_gen_extu_i32_i64(t1, t0);
1263             tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1264             per_branch_cond(s, TCG_COND_NE, t1, z);
1265         }
1266 
1267         ret = DISAS_PC_UPDATED;
1268     }
1269 
1270  egress:
1271     return ret;
1272 }
1273 
1274 /* ====================================================================== */
1275 /* The operations.  These perform the bulk of the work for any insn,
1276    usually after the operands have been loaded and output initialized.  */
1277 
1278 static DisasJumpType op_abs(DisasContext *s, DisasOps *o)
1279 {
1280     tcg_gen_abs_i64(o->out, o->in2);
1281     return DISAS_NEXT;
1282 }
1283 
1284 static DisasJumpType op_absf32(DisasContext *s, DisasOps *o)
1285 {
1286     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1287     return DISAS_NEXT;
1288 }
1289 
1290 static DisasJumpType op_absf64(DisasContext *s, DisasOps *o)
1291 {
1292     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1293     return DISAS_NEXT;
1294 }
1295 
1296 static DisasJumpType op_absf128(DisasContext *s, DisasOps *o)
1297 {
1298     tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1299     tcg_gen_mov_i64(o->out2, o->in2);
1300     return DISAS_NEXT;
1301 }
1302 
1303 static DisasJumpType op_add(DisasContext *s, DisasOps *o)
1304 {
1305     tcg_gen_add_i64(o->out, o->in1, o->in2);
1306     return DISAS_NEXT;
1307 }
1308 
1309 static DisasJumpType op_addu64(DisasContext *s, DisasOps *o)
1310 {
1311     tcg_gen_movi_i64(cc_src, 0);
1312     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1313     return DISAS_NEXT;
1314 }
1315 
1316 /* Compute carry into cc_src. */
1317 static void compute_carry(DisasContext *s)
1318 {
1319     switch (s->cc_op) {
1320     case CC_OP_ADDU:
1321         /* The carry value is already in cc_src (1,0). */
1322         break;
1323     case CC_OP_SUBU:
1324         tcg_gen_addi_i64(cc_src, cc_src, 1);
1325         break;
1326     default:
1327         gen_op_calc_cc(s);
1328         /* fall through */
1329     case CC_OP_STATIC:
1330         /* The carry flag is the msb of CC; compute into cc_src. */
1331         tcg_gen_extu_i32_i64(cc_src, cc_op);
1332         tcg_gen_shri_i64(cc_src, cc_src, 1);
1333         break;
1334     }
1335 }
1336 
1337 static DisasJumpType op_addc32(DisasContext *s, DisasOps *o)
1338 {
1339     compute_carry(s);
1340     tcg_gen_add_i64(o->out, o->in1, o->in2);
1341     tcg_gen_add_i64(o->out, o->out, cc_src);
1342     return DISAS_NEXT;
1343 }
1344 
1345 static DisasJumpType op_addc64(DisasContext *s, DisasOps *o)
1346 {
1347     compute_carry(s);
1348 
1349     TCGv_i64 zero = tcg_constant_i64(0);
1350     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, zero);
1351     tcg_gen_add2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
1352 
1353     return DISAS_NEXT;
1354 }
1355 
1356 static DisasJumpType op_asi(DisasContext *s, DisasOps *o)
1357 {
1358     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1359 
1360     o->in1 = tcg_temp_new_i64();
1361     if (non_atomic) {
1362         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1363     } else {
1364         /* Perform the atomic addition in memory. */
1365         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1366                                      s->insn->data);
1367     }
1368 
1369     /* Recompute also for atomic case: needed for setting CC. */
1370     tcg_gen_add_i64(o->out, o->in1, o->in2);
1371 
1372     if (non_atomic) {
1373         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1374     }
1375     return DISAS_NEXT;
1376 }
1377 
1378 static DisasJumpType op_asiu64(DisasContext *s, DisasOps *o)
1379 {
1380     bool non_atomic = !s390_has_feat(S390_FEAT_STFLE_45);
1381 
1382     o->in1 = tcg_temp_new_i64();
1383     if (non_atomic) {
1384         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1385     } else {
1386         /* Perform the atomic addition in memory. */
1387         tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1388                                      s->insn->data);
1389     }
1390 
1391     /* Recompute also for atomic case: needed for setting CC. */
1392     tcg_gen_movi_i64(cc_src, 0);
1393     tcg_gen_add2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
1394 
1395     if (non_atomic) {
1396         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1397     }
1398     return DISAS_NEXT;
1399 }
1400 
1401 static DisasJumpType op_aeb(DisasContext *s, DisasOps *o)
1402 {
1403     gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1404     return DISAS_NEXT;
1405 }
1406 
1407 static DisasJumpType op_adb(DisasContext *s, DisasOps *o)
1408 {
1409     gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1410     return DISAS_NEXT;
1411 }
1412 
1413 static DisasJumpType op_axb(DisasContext *s, DisasOps *o)
1414 {
1415     gen_helper_axb(o->out_128, cpu_env, o->in1_128, o->in2_128);
1416     return DISAS_NEXT;
1417 }
1418 
1419 static DisasJumpType op_and(DisasContext *s, DisasOps *o)
1420 {
1421     tcg_gen_and_i64(o->out, o->in1, o->in2);
1422     return DISAS_NEXT;
1423 }
1424 
1425 static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
1426 {
1427     int shift = s->insn->data & 0xff;
1428     int size = s->insn->data >> 8;
1429     uint64_t mask = ((1ull << size) - 1) << shift;
1430     TCGv_i64 t = tcg_temp_new_i64();
1431 
1432     tcg_gen_shli_i64(t, o->in2, shift);
1433     tcg_gen_ori_i64(t, t, ~mask);
1434     tcg_gen_and_i64(o->out, o->in1, t);
1435 
1436     /* Produce the CC from only the bits manipulated.  */
1437     tcg_gen_andi_i64(cc_dst, o->out, mask);
1438     set_cc_nz_u64(s, cc_dst);
1439     return DISAS_NEXT;
1440 }
1441 
1442 static DisasJumpType op_andc(DisasContext *s, DisasOps *o)
1443 {
1444     tcg_gen_andc_i64(o->out, o->in1, o->in2);
1445     return DISAS_NEXT;
1446 }
1447 
1448 static DisasJumpType op_orc(DisasContext *s, DisasOps *o)
1449 {
1450     tcg_gen_orc_i64(o->out, o->in1, o->in2);
1451     return DISAS_NEXT;
1452 }
1453 
1454 static DisasJumpType op_nand(DisasContext *s, DisasOps *o)
1455 {
1456     tcg_gen_nand_i64(o->out, o->in1, o->in2);
1457     return DISAS_NEXT;
1458 }
1459 
1460 static DisasJumpType op_nor(DisasContext *s, DisasOps *o)
1461 {
1462     tcg_gen_nor_i64(o->out, o->in1, o->in2);
1463     return DISAS_NEXT;
1464 }
1465 
1466 static DisasJumpType op_nxor(DisasContext *s, DisasOps *o)
1467 {
1468     tcg_gen_eqv_i64(o->out, o->in1, o->in2);
1469     return DISAS_NEXT;
1470 }
1471 
1472 static DisasJumpType op_ni(DisasContext *s, DisasOps *o)
1473 {
1474     o->in1 = tcg_temp_new_i64();
1475 
1476     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1477         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
1478     } else {
1479         /* Perform the atomic operation in memory. */
1480         tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
1481                                      s->insn->data);
1482     }
1483 
1484     /* Recompute also for atomic case: needed for setting CC. */
1485     tcg_gen_and_i64(o->out, o->in1, o->in2);
1486 
1487     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
1488         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
1489     }
1490     return DISAS_NEXT;
1491 }
1492 
1493 static DisasJumpType op_bas(DisasContext *s, DisasOps *o)
1494 {
1495     pc_to_link_info(o->out, s, s->pc_tmp);
1496     if (o->in2) {
1497         tcg_gen_mov_i64(psw_addr, o->in2);
1498         per_branch(s, false);
1499         return DISAS_PC_UPDATED;
1500     } else {
1501         return DISAS_NEXT;
1502     }
1503 }
1504 
1505 static void save_link_info(DisasContext *s, DisasOps *o)
1506 {
1507     TCGv_i64 t;
1508 
1509     if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) {
1510         pc_to_link_info(o->out, s, s->pc_tmp);
1511         return;
1512     }
1513     gen_op_calc_cc(s);
1514     tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull);
1515     tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp);
1516     t = tcg_temp_new_i64();
1517     tcg_gen_shri_i64(t, psw_mask, 16);
1518     tcg_gen_andi_i64(t, t, 0x0f000000);
1519     tcg_gen_or_i64(o->out, o->out, t);
1520     tcg_gen_extu_i32_i64(t, cc_op);
1521     tcg_gen_shli_i64(t, t, 28);
1522     tcg_gen_or_i64(o->out, o->out, t);
1523 }
1524 
1525 static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
1526 {
1527     save_link_info(s, o);
1528     if (o->in2) {
1529         tcg_gen_mov_i64(psw_addr, o->in2);
1530         per_branch(s, false);
1531         return DISAS_PC_UPDATED;
1532     } else {
1533         return DISAS_NEXT;
1534     }
1535 }
1536 
1537 /*
1538  * Disassemble the target of a branch. The results are returned in a form
1539  * suitable for passing into help_branch():
1540  *
1541  * - bool IS_IMM reflects whether the target is fixed or computed. Non-EXECUTEd
1542  *   branches, whose DisasContext *S contains the relative immediate field RI,
1543  *   are considered fixed. All the other branches are considered computed.
1544  * - int IMM is the value of RI.
1545  * - TCGv_i64 CDEST is the address of the computed target.
1546  */
1547 #define disas_jdest(s, ri, is_imm, imm, cdest) do {                            \
1548     if (have_field(s, ri)) {                                                   \
1549         if (unlikely(s->ex_value)) {                                           \
1550             cdest = tcg_temp_new_i64();                                        \
1551             tcg_gen_ld_i64(cdest, cpu_env, offsetof(CPUS390XState, ex_target));\
1552             tcg_gen_addi_i64(cdest, cdest, (int64_t)get_field(s, ri) * 2);     \
1553             is_imm = false;                                                    \
1554         } else {                                                               \
1555             is_imm = true;                                                     \
1556         }                                                                      \
1557     } else {                                                                   \
1558         is_imm = false;                                                        \
1559     }                                                                          \
1560     imm = is_imm ? get_field(s, ri) : 0;                                       \
1561 } while (false)
1562 
1563 static DisasJumpType op_basi(DisasContext *s, DisasOps *o)
1564 {
1565     DisasCompare c;
1566     bool is_imm;
1567     int imm;
1568 
1569     pc_to_link_info(o->out, s, s->pc_tmp);
1570 
1571     disas_jdest(s, i2, is_imm, imm, o->in2);
1572     disas_jcc(s, &c, 0xf);
1573     return help_branch(s, &c, is_imm, imm, o->in2);
1574 }
1575 
1576 static DisasJumpType op_bc(DisasContext *s, DisasOps *o)
1577 {
1578     int m1 = get_field(s, m1);
1579     DisasCompare c;
1580     bool is_imm;
1581     int imm;
1582 
1583     /* BCR with R2 = 0 causes no branching */
1584     if (have_field(s, r2) && get_field(s, r2) == 0) {
1585         if (m1 == 14) {
1586             /* Perform serialization */
1587             /* FIXME: check for fast-BCR-serialization facility */
1588             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1589         }
1590         if (m1 == 15) {
1591             /* Perform serialization */
1592             /* FIXME: perform checkpoint-synchronisation */
1593             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1594         }
1595         return DISAS_NEXT;
1596     }
1597 
1598     disas_jdest(s, i2, is_imm, imm, o->in2);
1599     disas_jcc(s, &c, m1);
1600     return help_branch(s, &c, is_imm, imm, o->in2);
1601 }
1602 
1603 static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
1604 {
1605     int r1 = get_field(s, r1);
1606     DisasCompare c;
1607     bool is_imm;
1608     TCGv_i64 t;
1609     int imm;
1610 
1611     c.cond = TCG_COND_NE;
1612     c.is_64 = false;
1613 
1614     t = tcg_temp_new_i64();
1615     tcg_gen_subi_i64(t, regs[r1], 1);
1616     store_reg32_i64(r1, t);
1617     c.u.s32.a = tcg_temp_new_i32();
1618     c.u.s32.b = tcg_constant_i32(0);
1619     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1620 
1621     disas_jdest(s, i2, is_imm, imm, o->in2);
1622     return help_branch(s, &c, is_imm, imm, o->in2);
1623 }
1624 
1625 static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
1626 {
1627     int r1 = get_field(s, r1);
1628     int imm = get_field(s, i2);
1629     DisasCompare c;
1630     TCGv_i64 t;
1631 
1632     c.cond = TCG_COND_NE;
1633     c.is_64 = false;
1634 
1635     t = tcg_temp_new_i64();
1636     tcg_gen_shri_i64(t, regs[r1], 32);
1637     tcg_gen_subi_i64(t, t, 1);
1638     store_reg32h_i64(r1, t);
1639     c.u.s32.a = tcg_temp_new_i32();
1640     c.u.s32.b = tcg_constant_i32(0);
1641     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1642 
1643     return help_branch(s, &c, 1, imm, o->in2);
1644 }
1645 
1646 static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
1647 {
1648     int r1 = get_field(s, r1);
1649     DisasCompare c;
1650     bool is_imm;
1651     int imm;
1652 
1653     c.cond = TCG_COND_NE;
1654     c.is_64 = true;
1655 
1656     tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1657     c.u.s64.a = regs[r1];
1658     c.u.s64.b = tcg_constant_i64(0);
1659 
1660     disas_jdest(s, i2, is_imm, imm, o->in2);
1661     return help_branch(s, &c, is_imm, imm, o->in2);
1662 }
1663 
1664 static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
1665 {
1666     int r1 = get_field(s, r1);
1667     int r3 = get_field(s, r3);
1668     DisasCompare c;
1669     bool is_imm;
1670     TCGv_i64 t;
1671     int imm;
1672 
1673     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1674     c.is_64 = false;
1675 
1676     t = tcg_temp_new_i64();
1677     tcg_gen_add_i64(t, regs[r1], regs[r3]);
1678     c.u.s32.a = tcg_temp_new_i32();
1679     c.u.s32.b = tcg_temp_new_i32();
1680     tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1681     tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1682     store_reg32_i64(r1, t);
1683 
1684     disas_jdest(s, i2, is_imm, imm, o->in2);
1685     return help_branch(s, &c, is_imm, imm, o->in2);
1686 }
1687 
1688 static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
1689 {
1690     int r1 = get_field(s, r1);
1691     int r3 = get_field(s, r3);
1692     DisasCompare c;
1693     bool is_imm;
1694     int imm;
1695 
1696     c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1697     c.is_64 = true;
1698 
1699     if (r1 == (r3 | 1)) {
1700         c.u.s64.b = load_reg(r3 | 1);
1701     } else {
1702         c.u.s64.b = regs[r3 | 1];
1703     }
1704 
1705     tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1706     c.u.s64.a = regs[r1];
1707 
1708     disas_jdest(s, i2, is_imm, imm, o->in2);
1709     return help_branch(s, &c, is_imm, imm, o->in2);
1710 }
1711 
1712 static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
1713 {
1714     int imm, m3 = get_field(s, m3);
1715     bool is_imm;
1716     DisasCompare c;
1717 
1718     c.cond = ltgt_cond[m3];
1719     if (s->insn->data) {
1720         c.cond = tcg_unsigned_cond(c.cond);
1721     }
1722     c.is_64 = true;
1723     c.u.s64.a = o->in1;
1724     c.u.s64.b = o->in2;
1725 
1726     o->out = NULL;
1727     disas_jdest(s, i4, is_imm, imm, o->out);
1728     if (!is_imm && !o->out) {
1729         imm = 0;
1730         o->out = get_address(s, 0, get_field(s, b4),
1731                              get_field(s, d4));
1732     }
1733 
1734     return help_branch(s, &c, is_imm, imm, o->out);
1735 }
1736 
1737 static DisasJumpType op_ceb(DisasContext *s, DisasOps *o)
1738 {
1739     gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1740     set_cc_static(s);
1741     return DISAS_NEXT;
1742 }
1743 
1744 static DisasJumpType op_cdb(DisasContext *s, DisasOps *o)
1745 {
1746     gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1747     set_cc_static(s);
1748     return DISAS_NEXT;
1749 }
1750 
1751 static DisasJumpType op_cxb(DisasContext *s, DisasOps *o)
1752 {
1753     gen_helper_cxb(cc_op, cpu_env, o->in1_128, o->in2_128);
1754     set_cc_static(s);
1755     return DISAS_NEXT;
1756 }
1757 
1758 static TCGv_i32 fpinst_extract_m34(DisasContext *s, bool m3_with_fpe,
1759                                    bool m4_with_fpe)
1760 {
1761     const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT);
1762     uint8_t m3 = get_field(s, m3);
1763     uint8_t m4 = get_field(s, m4);
1764 
1765     /* m3 field was introduced with FPE */
1766     if (!fpe && m3_with_fpe) {
1767         m3 = 0;
1768     }
1769     /* m4 field was introduced with FPE */
1770     if (!fpe && m4_with_fpe) {
1771         m4 = 0;
1772     }
1773 
1774     /* Check for valid rounding modes. Mode 3 was introduced later. */
1775     if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) {
1776         gen_program_exception(s, PGM_SPECIFICATION);
1777         return NULL;
1778     }
1779 
1780     return tcg_constant_i32(deposit32(m3, 4, 4, m4));
1781 }
1782 
1783 static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o)
1784 {
1785     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1786 
1787     if (!m34) {
1788         return DISAS_NORETURN;
1789     }
1790     gen_helper_cfeb(o->out, cpu_env, o->in2, m34);
1791     set_cc_static(s);
1792     return DISAS_NEXT;
1793 }
1794 
1795 static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o)
1796 {
1797     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1798 
1799     if (!m34) {
1800         return DISAS_NORETURN;
1801     }
1802     gen_helper_cfdb(o->out, cpu_env, o->in2, m34);
1803     set_cc_static(s);
1804     return DISAS_NEXT;
1805 }
1806 
1807 static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o)
1808 {
1809     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1810 
1811     if (!m34) {
1812         return DISAS_NORETURN;
1813     }
1814     gen_helper_cfxb(o->out, cpu_env, o->in2_128, m34);
1815     set_cc_static(s);
1816     return DISAS_NEXT;
1817 }
1818 
1819 static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o)
1820 {
1821     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1822 
1823     if (!m34) {
1824         return DISAS_NORETURN;
1825     }
1826     gen_helper_cgeb(o->out, cpu_env, o->in2, m34);
1827     set_cc_static(s);
1828     return DISAS_NEXT;
1829 }
1830 
1831 static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o)
1832 {
1833     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1834 
1835     if (!m34) {
1836         return DISAS_NORETURN;
1837     }
1838     gen_helper_cgdb(o->out, cpu_env, o->in2, m34);
1839     set_cc_static(s);
1840     return DISAS_NEXT;
1841 }
1842 
1843 static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o)
1844 {
1845     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
1846 
1847     if (!m34) {
1848         return DISAS_NORETURN;
1849     }
1850     gen_helper_cgxb(o->out, cpu_env, o->in2_128, m34);
1851     set_cc_static(s);
1852     return DISAS_NEXT;
1853 }
1854 
1855 static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o)
1856 {
1857     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1858 
1859     if (!m34) {
1860         return DISAS_NORETURN;
1861     }
1862     gen_helper_clfeb(o->out, cpu_env, o->in2, m34);
1863     set_cc_static(s);
1864     return DISAS_NEXT;
1865 }
1866 
1867 static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o)
1868 {
1869     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1870 
1871     if (!m34) {
1872         return DISAS_NORETURN;
1873     }
1874     gen_helper_clfdb(o->out, cpu_env, o->in2, m34);
1875     set_cc_static(s);
1876     return DISAS_NEXT;
1877 }
1878 
1879 static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o)
1880 {
1881     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1882 
1883     if (!m34) {
1884         return DISAS_NORETURN;
1885     }
1886     gen_helper_clfxb(o->out, cpu_env, o->in2_128, m34);
1887     set_cc_static(s);
1888     return DISAS_NEXT;
1889 }
1890 
1891 static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o)
1892 {
1893     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1894 
1895     if (!m34) {
1896         return DISAS_NORETURN;
1897     }
1898     gen_helper_clgeb(o->out, cpu_env, o->in2, m34);
1899     set_cc_static(s);
1900     return DISAS_NEXT;
1901 }
1902 
1903 static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o)
1904 {
1905     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1906 
1907     if (!m34) {
1908         return DISAS_NORETURN;
1909     }
1910     gen_helper_clgdb(o->out, cpu_env, o->in2, m34);
1911     set_cc_static(s);
1912     return DISAS_NEXT;
1913 }
1914 
1915 static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o)
1916 {
1917     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1918 
1919     if (!m34) {
1920         return DISAS_NORETURN;
1921     }
1922     gen_helper_clgxb(o->out, cpu_env, o->in2_128, m34);
1923     set_cc_static(s);
1924     return DISAS_NEXT;
1925 }
1926 
1927 static DisasJumpType op_cegb(DisasContext *s, DisasOps *o)
1928 {
1929     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1930 
1931     if (!m34) {
1932         return DISAS_NORETURN;
1933     }
1934     gen_helper_cegb(o->out, cpu_env, o->in2, m34);
1935     return DISAS_NEXT;
1936 }
1937 
1938 static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o)
1939 {
1940     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1941 
1942     if (!m34) {
1943         return DISAS_NORETURN;
1944     }
1945     gen_helper_cdgb(o->out, cpu_env, o->in2, m34);
1946     return DISAS_NEXT;
1947 }
1948 
1949 static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o)
1950 {
1951     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
1952 
1953     if (!m34) {
1954         return DISAS_NORETURN;
1955     }
1956     gen_helper_cxgb(o->out_128, cpu_env, o->in2, m34);
1957     return DISAS_NEXT;
1958 }
1959 
1960 static DisasJumpType op_celgb(DisasContext *s, DisasOps *o)
1961 {
1962     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1963 
1964     if (!m34) {
1965         return DISAS_NORETURN;
1966     }
1967     gen_helper_celgb(o->out, cpu_env, o->in2, m34);
1968     return DISAS_NEXT;
1969 }
1970 
1971 static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o)
1972 {
1973     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1974 
1975     if (!m34) {
1976         return DISAS_NORETURN;
1977     }
1978     gen_helper_cdlgb(o->out, cpu_env, o->in2, m34);
1979     return DISAS_NEXT;
1980 }
1981 
1982 static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o)
1983 {
1984     TCGv_i32 m34 = fpinst_extract_m34(s, false, false);
1985 
1986     if (!m34) {
1987         return DISAS_NORETURN;
1988     }
1989     gen_helper_cxlgb(o->out_128, cpu_env, o->in2, m34);
1990     return DISAS_NEXT;
1991 }
1992 
1993 static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
1994 {
1995     int r2 = get_field(s, r2);
1996     TCGv_i128 pair = tcg_temp_new_i128();
1997     TCGv_i64 len = tcg_temp_new_i64();
1998 
1999     gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
2000     set_cc_static(s);
2001     tcg_gen_extr_i128_i64(o->out, len, pair);
2002 
2003     tcg_gen_add_i64(regs[r2], regs[r2], len);
2004     tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
2005 
2006     return DISAS_NEXT;
2007 }
2008 
2009 static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
2010 {
2011     int l = get_field(s, l1);
2012     TCGv_i32 vl;
2013     MemOp mop;
2014 
2015     switch (l + 1) {
2016     case 1:
2017     case 2:
2018     case 4:
2019     case 8:
2020         mop = ctz32(l + 1) | MO_TE;
2021         tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop);
2022         tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
2023         gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
2024         return DISAS_NEXT;
2025     default:
2026         vl = tcg_constant_i32(l);
2027         gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
2028         set_cc_static(s);
2029         return DISAS_NEXT;
2030     }
2031 }
2032 
2033 static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
2034 {
2035     int r1 = get_field(s, r1);
2036     int r2 = get_field(s, r2);
2037     TCGv_i32 t1, t2;
2038 
2039     /* r1 and r2 must be even.  */
2040     if (r1 & 1 || r2 & 1) {
2041         gen_program_exception(s, PGM_SPECIFICATION);
2042         return DISAS_NORETURN;
2043     }
2044 
2045     t1 = tcg_constant_i32(r1);
2046     t2 = tcg_constant_i32(r2);
2047     gen_helper_clcl(cc_op, cpu_env, t1, t2);
2048     set_cc_static(s);
2049     return DISAS_NEXT;
2050 }
2051 
2052 static DisasJumpType op_clcle(DisasContext *s, DisasOps *o)
2053 {
2054     int r1 = get_field(s, r1);
2055     int r3 = get_field(s, r3);
2056     TCGv_i32 t1, t3;
2057 
2058     /* r1 and r3 must be even.  */
2059     if (r1 & 1 || r3 & 1) {
2060         gen_program_exception(s, PGM_SPECIFICATION);
2061         return DISAS_NORETURN;
2062     }
2063 
2064     t1 = tcg_constant_i32(r1);
2065     t3 = tcg_constant_i32(r3);
2066     gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
2067     set_cc_static(s);
2068     return DISAS_NEXT;
2069 }
2070 
2071 static DisasJumpType op_clclu(DisasContext *s, DisasOps *o)
2072 {
2073     int r1 = get_field(s, r1);
2074     int r3 = get_field(s, r3);
2075     TCGv_i32 t1, t3;
2076 
2077     /* r1 and r3 must be even.  */
2078     if (r1 & 1 || r3 & 1) {
2079         gen_program_exception(s, PGM_SPECIFICATION);
2080         return DISAS_NORETURN;
2081     }
2082 
2083     t1 = tcg_constant_i32(r1);
2084     t3 = tcg_constant_i32(r3);
2085     gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
2086     set_cc_static(s);
2087     return DISAS_NEXT;
2088 }
2089 
2090 static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
2091 {
2092     TCGv_i32 m3 = tcg_constant_i32(get_field(s, m3));
2093     TCGv_i32 t1 = tcg_temp_new_i32();
2094 
2095     tcg_gen_extrl_i64_i32(t1, o->in1);
2096     gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
2097     set_cc_static(s);
2098     return DISAS_NEXT;
2099 }
2100 
2101 static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
2102 {
2103     TCGv_i128 pair = tcg_temp_new_i128();
2104 
2105     gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
2106     tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
2107 
2108     set_cc_static(s);
2109     return DISAS_NEXT;
2110 }
2111 
2112 static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
2113 {
2114     TCGv_i64 t = tcg_temp_new_i64();
2115     tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
2116     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
2117     tcg_gen_or_i64(o->out, o->out, t);
2118     return DISAS_NEXT;
2119 }
2120 
2121 static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
2122 {
2123     int d2 = get_field(s, d2);
2124     int b2 = get_field(s, b2);
2125     TCGv_i64 addr, cc;
2126 
2127     /* Note that in1 = R3 (new value) and
2128        in2 = (zero-extended) R1 (expected value).  */
2129 
2130     addr = get_address(s, 0, b2, d2);
2131     tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
2132                                get_mem_index(s), s->insn->data | MO_ALIGN);
2133 
2134     /* Are the memory and expected values (un)equal?  Note that this setcond
2135        produces the output CC value, thus the NE sense of the test.  */
2136     cc = tcg_temp_new_i64();
2137     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
2138     tcg_gen_extrl_i64_i32(cc_op, cc);
2139     set_cc_static(s);
2140 
2141     return DISAS_NEXT;
2142 }
2143 
2144 static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o)
2145 {
2146     int r1 = get_field(s, r1);
2147 
2148     o->out_128 = tcg_temp_new_i128();
2149     tcg_gen_concat_i64_i128(o->out_128, regs[r1 + 1], regs[r1]);
2150 
2151     /* Note out (R1:R1+1) = expected value and in2 (R3:R3+1) = new value.  */
2152     tcg_gen_atomic_cmpxchg_i128(o->out_128, o->addr1, o->out_128, o->in2_128,
2153                                 get_mem_index(s), MO_BE | MO_128 | MO_ALIGN);
2154 
2155     /*
2156      * Extract result into cc_dst:cc_src, compare vs the expected value
2157      * in the as yet unmodified input registers, then update CC_OP.
2158      */
2159     tcg_gen_extr_i128_i64(cc_src, cc_dst, o->out_128);
2160     tcg_gen_xor_i64(cc_dst, cc_dst, regs[r1]);
2161     tcg_gen_xor_i64(cc_src, cc_src, regs[r1 + 1]);
2162     tcg_gen_or_i64(cc_dst, cc_dst, cc_src);
2163     set_cc_nz_u64(s, cc_dst);
2164 
2165     return DISAS_NEXT;
2166 }
2167 
2168 static DisasJumpType op_csst(DisasContext *s, DisasOps *o)
2169 {
2170     int r3 = get_field(s, r3);
2171     TCGv_i32 t_r3 = tcg_constant_i32(r3);
2172 
2173     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2174         gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2175     } else {
2176         gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2);
2177     }
2178 
2179     set_cc_static(s);
2180     return DISAS_NEXT;
2181 }
2182 
2183 #ifndef CONFIG_USER_ONLY
2184 static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
2185 {
2186     MemOp mop = s->insn->data;
2187     TCGv_i64 addr, old, cc;
2188     TCGLabel *lab = gen_new_label();
2189 
2190     /* Note that in1 = R1 (zero-extended expected value),
2191        out = R1 (original reg), out2 = R1+1 (new value).  */
2192 
2193     addr = tcg_temp_new_i64();
2194     old = tcg_temp_new_i64();
2195     tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2196     tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2197                                get_mem_index(s), mop | MO_ALIGN);
2198 
2199     /* Are the memory and expected values (un)equal?  */
2200     cc = tcg_temp_new_i64();
2201     tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2202     tcg_gen_extrl_i64_i32(cc_op, cc);
2203 
2204     /* Write back the output now, so that it happens before the
2205        following branch, so that we don't need local temps.  */
2206     if ((mop & MO_SIZE) == MO_32) {
2207         tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2208     } else {
2209         tcg_gen_mov_i64(o->out, old);
2210     }
2211 
2212     /* If the comparison was equal, and the LSB of R2 was set,
2213        then we need to flush the TLB (for all cpus).  */
2214     tcg_gen_xori_i64(cc, cc, 1);
2215     tcg_gen_and_i64(cc, cc, o->in2);
2216     tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2217 
2218     gen_helper_purge(cpu_env);
2219     gen_set_label(lab);
2220 
2221     return DISAS_NEXT;
2222 }
2223 #endif
2224 
2225 static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
2226 {
2227     TCGv_i64 t1 = tcg_temp_new_i64();
2228     TCGv_i32 t2 = tcg_temp_new_i32();
2229     tcg_gen_extrl_i64_i32(t2, o->in1);
2230     gen_helper_cvd(t1, t2);
2231     tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
2232     return DISAS_NEXT;
2233 }
2234 
2235 static DisasJumpType op_ct(DisasContext *s, DisasOps *o)
2236 {
2237     int m3 = get_field(s, m3);
2238     TCGLabel *lab = gen_new_label();
2239     TCGCond c;
2240 
2241     c = tcg_invert_cond(ltgt_cond[m3]);
2242     if (s->insn->data) {
2243         c = tcg_unsigned_cond(c);
2244     }
2245     tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2246 
2247     /* Trap.  */
2248     gen_trap(s);
2249 
2250     gen_set_label(lab);
2251     return DISAS_NEXT;
2252 }
2253 
2254 static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o)
2255 {
2256     int m3 = get_field(s, m3);
2257     int r1 = get_field(s, r1);
2258     int r2 = get_field(s, r2);
2259     TCGv_i32 tr1, tr2, chk;
2260 
2261     /* R1 and R2 must both be even.  */
2262     if ((r1 | r2) & 1) {
2263         gen_program_exception(s, PGM_SPECIFICATION);
2264         return DISAS_NORETURN;
2265     }
2266     if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2267         m3 = 0;
2268     }
2269 
2270     tr1 = tcg_constant_i32(r1);
2271     tr2 = tcg_constant_i32(r2);
2272     chk = tcg_constant_i32(m3);
2273 
2274     switch (s->insn->data) {
2275     case 12:
2276         gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2277         break;
2278     case 14:
2279         gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2280         break;
2281     case 21:
2282         gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2283         break;
2284     case 24:
2285         gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2286         break;
2287     case 41:
2288         gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2289         break;
2290     case 42:
2291         gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2292         break;
2293     default:
2294         g_assert_not_reached();
2295     }
2296 
2297     set_cc_static(s);
2298     return DISAS_NEXT;
2299 }
2300 
2301 #ifndef CONFIG_USER_ONLY
2302 static DisasJumpType op_diag(DisasContext *s, DisasOps *o)
2303 {
2304     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2305     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2306     TCGv_i32 func_code = tcg_constant_i32(get_field(s, i2));
2307 
2308     gen_helper_diag(cpu_env, r1, r3, func_code);
2309     return DISAS_NEXT;
2310 }
2311 #endif
2312 
2313 static DisasJumpType op_divs32(DisasContext *s, DisasOps *o)
2314 {
2315     gen_helper_divs32(o->out, cpu_env, o->in1, o->in2);
2316     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2317     return DISAS_NEXT;
2318 }
2319 
2320 static DisasJumpType op_divu32(DisasContext *s, DisasOps *o)
2321 {
2322     gen_helper_divu32(o->out, cpu_env, o->in1, o->in2);
2323     tcg_gen_extr32_i64(o->out2, o->out, o->out);
2324     return DISAS_NEXT;
2325 }
2326 
2327 static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
2328 {
2329     TCGv_i128 t = tcg_temp_new_i128();
2330 
2331     gen_helper_divs64(t, cpu_env, o->in1, o->in2);
2332     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2333     return DISAS_NEXT;
2334 }
2335 
2336 static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
2337 {
2338     TCGv_i128 t = tcg_temp_new_i128();
2339 
2340     gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
2341     tcg_gen_extr_i128_i64(o->out2, o->out, t);
2342     return DISAS_NEXT;
2343 }
2344 
2345 static DisasJumpType op_deb(DisasContext *s, DisasOps *o)
2346 {
2347     gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2348     return DISAS_NEXT;
2349 }
2350 
2351 static DisasJumpType op_ddb(DisasContext *s, DisasOps *o)
2352 {
2353     gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2354     return DISAS_NEXT;
2355 }
2356 
2357 static DisasJumpType op_dxb(DisasContext *s, DisasOps *o)
2358 {
2359     gen_helper_dxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
2360     return DISAS_NEXT;
2361 }
2362 
2363 static DisasJumpType op_ear(DisasContext *s, DisasOps *o)
2364 {
2365     int r2 = get_field(s, r2);
2366     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2367     return DISAS_NEXT;
2368 }
2369 
2370 static DisasJumpType op_ecag(DisasContext *s, DisasOps *o)
2371 {
2372     /* No cache information provided.  */
2373     tcg_gen_movi_i64(o->out, -1);
2374     return DISAS_NEXT;
2375 }
2376 
2377 static DisasJumpType op_efpc(DisasContext *s, DisasOps *o)
2378 {
2379     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2380     return DISAS_NEXT;
2381 }
2382 
2383 static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
2384 {
2385     int r1 = get_field(s, r1);
2386     int r2 = get_field(s, r2);
2387     TCGv_i64 t = tcg_temp_new_i64();
2388 
2389     /* Note the "subsequently" in the PoO, which implies a defined result
2390        if r1 == r2.  Thus we cannot defer these writes to an output hook.  */
2391     tcg_gen_shri_i64(t, psw_mask, 32);
2392     store_reg32_i64(r1, t);
2393     if (r2 != 0) {
2394         store_reg32_i64(r2, psw_mask);
2395     }
2396     return DISAS_NEXT;
2397 }
2398 
2399 static DisasJumpType op_ex(DisasContext *s, DisasOps *o)
2400 {
2401     int r1 = get_field(s, r1);
2402     TCGv_i32 ilen;
2403     TCGv_i64 v1;
2404 
2405     /* Nested EXECUTE is not allowed.  */
2406     if (unlikely(s->ex_value)) {
2407         gen_program_exception(s, PGM_EXECUTE);
2408         return DISAS_NORETURN;
2409     }
2410 
2411     update_psw_addr(s);
2412     update_cc_op(s);
2413 
2414     if (r1 == 0) {
2415         v1 = tcg_constant_i64(0);
2416     } else {
2417         v1 = regs[r1];
2418     }
2419 
2420     ilen = tcg_constant_i32(s->ilen);
2421     gen_helper_ex(cpu_env, ilen, v1, o->in2);
2422 
2423     return DISAS_PC_CC_UPDATED;
2424 }
2425 
2426 static DisasJumpType op_fieb(DisasContext *s, DisasOps *o)
2427 {
2428     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2429 
2430     if (!m34) {
2431         return DISAS_NORETURN;
2432     }
2433     gen_helper_fieb(o->out, cpu_env, o->in2, m34);
2434     return DISAS_NEXT;
2435 }
2436 
2437 static DisasJumpType op_fidb(DisasContext *s, DisasOps *o)
2438 {
2439     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2440 
2441     if (!m34) {
2442         return DISAS_NORETURN;
2443     }
2444     gen_helper_fidb(o->out, cpu_env, o->in2, m34);
2445     return DISAS_NEXT;
2446 }
2447 
2448 static DisasJumpType op_fixb(DisasContext *s, DisasOps *o)
2449 {
2450     TCGv_i32 m34 = fpinst_extract_m34(s, false, true);
2451 
2452     if (!m34) {
2453         return DISAS_NORETURN;
2454     }
2455     gen_helper_fixb(o->out_128, cpu_env, o->in2_128, m34);
2456     return DISAS_NEXT;
2457 }
2458 
2459 static DisasJumpType op_flogr(DisasContext *s, DisasOps *o)
2460 {
2461     /* We'll use the original input for cc computation, since we get to
2462        compare that against 0, which ought to be better than comparing
2463        the real output against 64.  It also lets cc_dst be a convenient
2464        temporary during our computation.  */
2465     gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2466 
2467     /* R1 = IN ? CLZ(IN) : 64.  */
2468     tcg_gen_clzi_i64(o->out, o->in2, 64);
2469 
2470     /* R1+1 = IN & ~(found bit).  Note that we may attempt to shift this
2471        value by 64, which is undefined.  But since the shift is 64 iff the
2472        input is zero, we still get the correct result after and'ing.  */
2473     tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2474     tcg_gen_shr_i64(o->out2, o->out2, o->out);
2475     tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2476     return DISAS_NEXT;
2477 }
2478 
2479 static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
2480 {
2481     int m3 = get_field(s, m3);
2482     int pos, len, base = s->insn->data;
2483     TCGv_i64 tmp = tcg_temp_new_i64();
2484     uint64_t ccm;
2485 
2486     switch (m3) {
2487     case 0xf:
2488         /* Effectively a 32-bit load.  */
2489         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
2490         len = 32;
2491         goto one_insert;
2492 
2493     case 0xc:
2494     case 0x6:
2495     case 0x3:
2496         /* Effectively a 16-bit load.  */
2497         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
2498         len = 16;
2499         goto one_insert;
2500 
2501     case 0x8:
2502     case 0x4:
2503     case 0x2:
2504     case 0x1:
2505         /* Effectively an 8-bit load.  */
2506         tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2507         len = 8;
2508         goto one_insert;
2509 
2510     one_insert:
2511         pos = base + ctz32(m3) * 8;
2512         tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2513         ccm = ((1ull << len) - 1) << pos;
2514         break;
2515 
2516     default:
2517         /* This is going to be a sequence of loads and inserts.  */
2518         pos = base + 32 - 8;
2519         ccm = 0;
2520         while (m3) {
2521             if (m3 & 0x8) {
2522                 tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
2523                 tcg_gen_addi_i64(o->in2, o->in2, 1);
2524                 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2525                 ccm |= 0xffull << pos;
2526             }
2527             m3 = (m3 << 1) & 0xf;
2528             pos -= 8;
2529         }
2530         break;
2531     }
2532 
2533     tcg_gen_movi_i64(tmp, ccm);
2534     gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2535     return DISAS_NEXT;
2536 }
2537 
2538 static DisasJumpType op_insi(DisasContext *s, DisasOps *o)
2539 {
2540     int shift = s->insn->data & 0xff;
2541     int size = s->insn->data >> 8;
2542     tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2543     return DISAS_NEXT;
2544 }
2545 
2546 static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
2547 {
2548     TCGv_i64 t1, t2;
2549 
2550     gen_op_calc_cc(s);
2551     t1 = tcg_temp_new_i64();
2552     tcg_gen_extract_i64(t1, psw_mask, 40, 4);
2553     t2 = tcg_temp_new_i64();
2554     tcg_gen_extu_i32_i64(t2, cc_op);
2555     tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
2556     tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
2557     return DISAS_NEXT;
2558 }
2559 
2560 #ifndef CONFIG_USER_ONLY
2561 static DisasJumpType op_idte(DisasContext *s, DisasOps *o)
2562 {
2563     TCGv_i32 m4;
2564 
2565     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2566         m4 = tcg_constant_i32(get_field(s, m4));
2567     } else {
2568         m4 = tcg_constant_i32(0);
2569     }
2570     gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2571     return DISAS_NEXT;
2572 }
2573 
2574 static DisasJumpType op_ipte(DisasContext *s, DisasOps *o)
2575 {
2576     TCGv_i32 m4;
2577 
2578     if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2579         m4 = tcg_constant_i32(get_field(s, m4));
2580     } else {
2581         m4 = tcg_constant_i32(0);
2582     }
2583     gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2584     return DISAS_NEXT;
2585 }
2586 
2587 static DisasJumpType op_iske(DisasContext *s, DisasOps *o)
2588 {
2589     gen_helper_iske(o->out, cpu_env, o->in2);
2590     return DISAS_NEXT;
2591 }
2592 #endif
2593 
2594 static DisasJumpType op_msa(DisasContext *s, DisasOps *o)
2595 {
2596     int r1 = have_field(s, r1) ? get_field(s, r1) : 0;
2597     int r2 = have_field(s, r2) ? get_field(s, r2) : 0;
2598     int r3 = have_field(s, r3) ? get_field(s, r3) : 0;
2599     TCGv_i32 t_r1, t_r2, t_r3, type;
2600 
2601     switch (s->insn->data) {
2602     case S390_FEAT_TYPE_KMA:
2603         if (r3 == r1 || r3 == r2) {
2604             gen_program_exception(s, PGM_SPECIFICATION);
2605             return DISAS_NORETURN;
2606         }
2607         /* FALL THROUGH */
2608     case S390_FEAT_TYPE_KMCTR:
2609         if (r3 & 1 || !r3) {
2610             gen_program_exception(s, PGM_SPECIFICATION);
2611             return DISAS_NORETURN;
2612         }
2613         /* FALL THROUGH */
2614     case S390_FEAT_TYPE_PPNO:
2615     case S390_FEAT_TYPE_KMF:
2616     case S390_FEAT_TYPE_KMC:
2617     case S390_FEAT_TYPE_KMO:
2618     case S390_FEAT_TYPE_KM:
2619         if (r1 & 1 || !r1) {
2620             gen_program_exception(s, PGM_SPECIFICATION);
2621             return DISAS_NORETURN;
2622         }
2623         /* FALL THROUGH */
2624     case S390_FEAT_TYPE_KMAC:
2625     case S390_FEAT_TYPE_KIMD:
2626     case S390_FEAT_TYPE_KLMD:
2627         if (r2 & 1 || !r2) {
2628             gen_program_exception(s, PGM_SPECIFICATION);
2629             return DISAS_NORETURN;
2630         }
2631         /* FALL THROUGH */
2632     case S390_FEAT_TYPE_PCKMO:
2633     case S390_FEAT_TYPE_PCC:
2634         break;
2635     default:
2636         g_assert_not_reached();
2637     };
2638 
2639     t_r1 = tcg_constant_i32(r1);
2640     t_r2 = tcg_constant_i32(r2);
2641     t_r3 = tcg_constant_i32(r3);
2642     type = tcg_constant_i32(s->insn->data);
2643     gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2644     set_cc_static(s);
2645     return DISAS_NEXT;
2646 }
2647 
2648 static DisasJumpType op_keb(DisasContext *s, DisasOps *o)
2649 {
2650     gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2651     set_cc_static(s);
2652     return DISAS_NEXT;
2653 }
2654 
2655 static DisasJumpType op_kdb(DisasContext *s, DisasOps *o)
2656 {
2657     gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2658     set_cc_static(s);
2659     return DISAS_NEXT;
2660 }
2661 
2662 static DisasJumpType op_kxb(DisasContext *s, DisasOps *o)
2663 {
2664     gen_helper_kxb(cc_op, cpu_env, o->in1_128, o->in2_128);
2665     set_cc_static(s);
2666     return DISAS_NEXT;
2667 }
2668 
2669 static DisasJumpType op_laa(DisasContext *s, DisasOps *o)
2670 {
2671     /* The real output is indeed the original value in memory;
2672        recompute the addition for the computation of CC.  */
2673     tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2674                                  s->insn->data | MO_ALIGN);
2675     /* However, we need to recompute the addition for setting CC.  */
2676     tcg_gen_add_i64(o->out, o->in1, o->in2);
2677     return DISAS_NEXT;
2678 }
2679 
2680 static DisasJumpType op_lan(DisasContext *s, DisasOps *o)
2681 {
2682     /* The real output is indeed the original value in memory;
2683        recompute the addition for the computation of CC.  */
2684     tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2685                                  s->insn->data | MO_ALIGN);
2686     /* However, we need to recompute the operation for setting CC.  */
2687     tcg_gen_and_i64(o->out, o->in1, o->in2);
2688     return DISAS_NEXT;
2689 }
2690 
2691 static DisasJumpType op_lao(DisasContext *s, DisasOps *o)
2692 {
2693     /* The real output is indeed the original value in memory;
2694        recompute the addition for the computation of CC.  */
2695     tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2696                                 s->insn->data | MO_ALIGN);
2697     /* However, we need to recompute the operation for setting CC.  */
2698     tcg_gen_or_i64(o->out, o->in1, o->in2);
2699     return DISAS_NEXT;
2700 }
2701 
2702 static DisasJumpType op_lax(DisasContext *s, DisasOps *o)
2703 {
2704     /* The real output is indeed the original value in memory;
2705        recompute the addition for the computation of CC.  */
2706     tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2707                                  s->insn->data | MO_ALIGN);
2708     /* However, we need to recompute the operation for setting CC.  */
2709     tcg_gen_xor_i64(o->out, o->in1, o->in2);
2710     return DISAS_NEXT;
2711 }
2712 
2713 static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o)
2714 {
2715     gen_helper_ldeb(o->out, cpu_env, o->in2);
2716     return DISAS_NEXT;
2717 }
2718 
2719 static DisasJumpType op_ledb(DisasContext *s, DisasOps *o)
2720 {
2721     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2722 
2723     if (!m34) {
2724         return DISAS_NORETURN;
2725     }
2726     gen_helper_ledb(o->out, cpu_env, o->in2, m34);
2727     return DISAS_NEXT;
2728 }
2729 
2730 static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o)
2731 {
2732     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2733 
2734     if (!m34) {
2735         return DISAS_NORETURN;
2736     }
2737     gen_helper_ldxb(o->out, cpu_env, o->in2_128, m34);
2738     return DISAS_NEXT;
2739 }
2740 
2741 static DisasJumpType op_lexb(DisasContext *s, DisasOps *o)
2742 {
2743     TCGv_i32 m34 = fpinst_extract_m34(s, true, true);
2744 
2745     if (!m34) {
2746         return DISAS_NORETURN;
2747     }
2748     gen_helper_lexb(o->out, cpu_env, o->in2_128, m34);
2749     return DISAS_NEXT;
2750 }
2751 
2752 static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o)
2753 {
2754     gen_helper_lxdb(o->out_128, cpu_env, o->in2);
2755     return DISAS_NEXT;
2756 }
2757 
2758 static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o)
2759 {
2760     gen_helper_lxeb(o->out_128, cpu_env, o->in2);
2761     return DISAS_NEXT;
2762 }
2763 
2764 static DisasJumpType op_lde(DisasContext *s, DisasOps *o)
2765 {
2766     tcg_gen_shli_i64(o->out, o->in2, 32);
2767     return DISAS_NEXT;
2768 }
2769 
2770 static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
2771 {
2772     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2773     return DISAS_NEXT;
2774 }
2775 
2776 static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
2777 {
2778     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
2779     return DISAS_NEXT;
2780 }
2781 
2782 static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
2783 {
2784     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
2785     return DISAS_NEXT;
2786 }
2787 
2788 static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
2789 {
2790     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
2791     return DISAS_NEXT;
2792 }
2793 
2794 static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
2795 {
2796     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
2797     return DISAS_NEXT;
2798 }
2799 
2800 static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o)
2801 {
2802     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2803                        MO_TESL | s->insn->data);
2804     return DISAS_NEXT;
2805 }
2806 
2807 static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o)
2808 {
2809     tcg_gen_qemu_ld_tl(o->out, o->in2, get_mem_index(s),
2810                        MO_TEUL | s->insn->data);
2811     return DISAS_NEXT;
2812 }
2813 
2814 static DisasJumpType op_ld64(DisasContext *s, DisasOps *o)
2815 {
2816     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s),
2817                         MO_TEUQ | s->insn->data);
2818     return DISAS_NEXT;
2819 }
2820 
2821 static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
2822 {
2823     TCGLabel *lab = gen_new_label();
2824     store_reg32_i64(get_field(s, r1), o->in2);
2825     /* The value is stored even in case of trap. */
2826     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2827     gen_trap(s);
2828     gen_set_label(lab);
2829     return DISAS_NEXT;
2830 }
2831 
2832 static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
2833 {
2834     TCGLabel *lab = gen_new_label();
2835     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
2836     /* The value is stored even in case of trap. */
2837     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2838     gen_trap(s);
2839     gen_set_label(lab);
2840     return DISAS_NEXT;
2841 }
2842 
2843 static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
2844 {
2845     TCGLabel *lab = gen_new_label();
2846     store_reg32h_i64(get_field(s, r1), o->in2);
2847     /* The value is stored even in case of trap. */
2848     tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2849     gen_trap(s);
2850     gen_set_label(lab);
2851     return DISAS_NEXT;
2852 }
2853 
2854 static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
2855 {
2856     TCGLabel *lab = gen_new_label();
2857 
2858     tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
2859     /* The value is stored even in case of trap. */
2860     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2861     gen_trap(s);
2862     gen_set_label(lab);
2863     return DISAS_NEXT;
2864 }
2865 
2866 static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o)
2867 {
2868     TCGLabel *lab = gen_new_label();
2869     tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2870     /* The value is stored even in case of trap. */
2871     tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2872     gen_trap(s);
2873     gen_set_label(lab);
2874     return DISAS_NEXT;
2875 }
2876 
2877 static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
2878 {
2879     DisasCompare c;
2880 
2881     if (have_field(s, m3)) {
2882         /* LOAD * ON CONDITION */
2883         disas_jcc(s, &c, get_field(s, m3));
2884     } else {
2885         /* SELECT */
2886         disas_jcc(s, &c, get_field(s, m4));
2887     }
2888 
2889     if (c.is_64) {
2890         tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2891                             o->in2, o->in1);
2892     } else {
2893         TCGv_i32 t32 = tcg_temp_new_i32();
2894         TCGv_i64 t, z;
2895 
2896         tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2897 
2898         t = tcg_temp_new_i64();
2899         tcg_gen_extu_i32_i64(t, t32);
2900 
2901         z = tcg_constant_i64(0);
2902         tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2903     }
2904 
2905     return DISAS_NEXT;
2906 }
2907 
2908 #ifndef CONFIG_USER_ONLY
2909 static DisasJumpType op_lctl(DisasContext *s, DisasOps *o)
2910 {
2911     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2912     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2913 
2914     gen_helper_lctl(cpu_env, r1, o->in2, r3);
2915     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2916     s->exit_to_mainloop = true;
2917     return DISAS_TOO_MANY;
2918 }
2919 
2920 static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o)
2921 {
2922     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2923     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2924 
2925     gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2926     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
2927     s->exit_to_mainloop = true;
2928     return DISAS_TOO_MANY;
2929 }
2930 
2931 static DisasJumpType op_lra(DisasContext *s, DisasOps *o)
2932 {
2933     gen_helper_lra(o->out, cpu_env, o->in2);
2934     set_cc_static(s);
2935     return DISAS_NEXT;
2936 }
2937 
2938 static DisasJumpType op_lpp(DisasContext *s, DisasOps *o)
2939 {
2940     tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2941     return DISAS_NEXT;
2942 }
2943 
2944 static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
2945 {
2946     TCGv_i64 mask, addr;
2947 
2948     per_breaking_event(s);
2949 
2950     /*
2951      * Convert the short PSW into the normal PSW, similar to what
2952      * s390_cpu_load_normal() does.
2953      */
2954     mask = tcg_temp_new_i64();
2955     addr = tcg_temp_new_i64();
2956     tcg_gen_qemu_ld_i64(mask, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN_8);
2957     tcg_gen_andi_i64(addr, mask, PSW_MASK_SHORT_ADDR);
2958     tcg_gen_andi_i64(mask, mask, PSW_MASK_SHORT_CTRL);
2959     tcg_gen_xori_i64(mask, mask, PSW_MASK_SHORTPSW);
2960     gen_helper_load_psw(cpu_env, mask, addr);
2961     return DISAS_NORETURN;
2962 }
2963 
2964 static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
2965 {
2966     TCGv_i64 t1, t2;
2967 
2968     per_breaking_event(s);
2969 
2970     t1 = tcg_temp_new_i64();
2971     t2 = tcg_temp_new_i64();
2972     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
2973                         MO_TEUQ | MO_ALIGN_8);
2974     tcg_gen_addi_i64(o->in2, o->in2, 8);
2975     tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
2976     gen_helper_load_psw(cpu_env, t1, t2);
2977     return DISAS_NORETURN;
2978 }
2979 #endif
2980 
2981 static DisasJumpType op_lam(DisasContext *s, DisasOps *o)
2982 {
2983     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
2984     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
2985 
2986     gen_helper_lam(cpu_env, r1, o->in2, r3);
2987     return DISAS_NEXT;
2988 }
2989 
2990 static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
2991 {
2992     int r1 = get_field(s, r1);
2993     int r3 = get_field(s, r3);
2994     TCGv_i64 t1, t2;
2995 
2996     /* Only one register to read. */
2997     t1 = tcg_temp_new_i64();
2998     if (unlikely(r1 == r3)) {
2999         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3000         store_reg32_i64(r1, t1);
3001         return DISAS_NEXT;
3002     }
3003 
3004     /* First load the values of the first and last registers to trigger
3005        possible page faults. */
3006     t2 = tcg_temp_new_i64();
3007     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3008     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3009     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3010     store_reg32_i64(r1, t1);
3011     store_reg32_i64(r3, t2);
3012 
3013     /* Only two registers to read. */
3014     if (((r1 + 1) & 15) == r3) {
3015         return DISAS_NEXT;
3016     }
3017 
3018     /* Then load the remaining registers. Page fault can't occur. */
3019     r3 = (r3 - 1) & 15;
3020     tcg_gen_movi_i64(t2, 4);
3021     while (r1 != r3) {
3022         r1 = (r1 + 1) & 15;
3023         tcg_gen_add_i64(o->in2, o->in2, t2);
3024         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3025         store_reg32_i64(r1, t1);
3026     }
3027     return DISAS_NEXT;
3028 }
3029 
3030 static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
3031 {
3032     int r1 = get_field(s, r1);
3033     int r3 = get_field(s, r3);
3034     TCGv_i64 t1, t2;
3035 
3036     /* Only one register to read. */
3037     t1 = tcg_temp_new_i64();
3038     if (unlikely(r1 == r3)) {
3039         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3040         store_reg32h_i64(r1, t1);
3041         return DISAS_NEXT;
3042     }
3043 
3044     /* First load the values of the first and last registers to trigger
3045        possible page faults. */
3046     t2 = tcg_temp_new_i64();
3047     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3048     tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
3049     tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
3050     store_reg32h_i64(r1, t1);
3051     store_reg32h_i64(r3, t2);
3052 
3053     /* Only two registers to read. */
3054     if (((r1 + 1) & 15) == r3) {
3055         return DISAS_NEXT;
3056     }
3057 
3058     /* Then load the remaining registers. Page fault can't occur. */
3059     r3 = (r3 - 1) & 15;
3060     tcg_gen_movi_i64(t2, 4);
3061     while (r1 != r3) {
3062         r1 = (r1 + 1) & 15;
3063         tcg_gen_add_i64(o->in2, o->in2, t2);
3064         tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
3065         store_reg32h_i64(r1, t1);
3066     }
3067     return DISAS_NEXT;
3068 }
3069 
3070 static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
3071 {
3072     int r1 = get_field(s, r1);
3073     int r3 = get_field(s, r3);
3074     TCGv_i64 t1, t2;
3075 
3076     /* Only one register to read. */
3077     if (unlikely(r1 == r3)) {
3078         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3079         return DISAS_NEXT;
3080     }
3081 
3082     /* First load the values of the first and last registers to trigger
3083        possible page faults. */
3084     t1 = tcg_temp_new_i64();
3085     t2 = tcg_temp_new_i64();
3086     tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
3087     tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
3088     tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
3089     tcg_gen_mov_i64(regs[r1], t1);
3090 
3091     /* Only two registers to read. */
3092     if (((r1 + 1) & 15) == r3) {
3093         return DISAS_NEXT;
3094     }
3095 
3096     /* Then load the remaining registers. Page fault can't occur. */
3097     r3 = (r3 - 1) & 15;
3098     tcg_gen_movi_i64(t1, 8);
3099     while (r1 != r3) {
3100         r1 = (r1 + 1) & 15;
3101         tcg_gen_add_i64(o->in2, o->in2, t1);
3102         tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
3103     }
3104     return DISAS_NEXT;
3105 }
3106 
3107 static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
3108 {
3109     TCGv_i64 a1, a2;
3110     MemOp mop = s->insn->data;
3111 
3112     /* In a parallel context, stop the world and single step.  */
3113     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3114         update_psw_addr(s);
3115         update_cc_op(s);
3116         gen_exception(EXCP_ATOMIC);
3117         return DISAS_NORETURN;
3118     }
3119 
3120     /* In a serial context, perform the two loads ... */
3121     a1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
3122     a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3123     tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
3124     tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
3125 
3126     /* ... and indicate that we performed them while interlocked.  */
3127     gen_op_movi_cc(s, 0);
3128     return DISAS_NEXT;
3129 }
3130 
3131 static DisasJumpType op_lpq(DisasContext *s, DisasOps *o)
3132 {
3133     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
3134         gen_helper_lpq(o->out, cpu_env, o->in2);
3135     } else if (HAVE_ATOMIC128) {
3136         gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
3137     } else {
3138         gen_helper_exit_atomic(cpu_env);
3139         return DISAS_NORETURN;
3140     }
3141     return_low128(o->out2);
3142     return DISAS_NEXT;
3143 }
3144 
3145 #ifndef CONFIG_USER_ONLY
3146 static DisasJumpType op_lura(DisasContext *s, DisasOps *o)
3147 {
3148     tcg_gen_qemu_ld_tl(o->out, o->in2, MMU_REAL_IDX, s->insn->data);
3149     return DISAS_NEXT;
3150 }
3151 #endif
3152 
3153 static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o)
3154 {
3155     tcg_gen_andi_i64(o->out, o->in2, -256);
3156     return DISAS_NEXT;
3157 }
3158 
3159 static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o)
3160 {
3161     const int64_t block_size = (1ull << (get_field(s, m3) + 6));
3162 
3163     if (get_field(s, m3) > 6) {
3164         gen_program_exception(s, PGM_SPECIFICATION);
3165         return DISAS_NORETURN;
3166     }
3167 
3168     tcg_gen_ori_i64(o->addr1, o->addr1, -block_size);
3169     tcg_gen_neg_i64(o->addr1, o->addr1);
3170     tcg_gen_movi_i64(o->out, 16);
3171     tcg_gen_umin_i64(o->out, o->out, o->addr1);
3172     gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out);
3173     return DISAS_NEXT;
3174 }
3175 
3176 static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
3177 {
3178     const uint16_t monitor_class = get_field(s, i2);
3179 
3180     if (monitor_class & 0xff00) {
3181         gen_program_exception(s, PGM_SPECIFICATION);
3182         return DISAS_NORETURN;
3183     }
3184 
3185 #if !defined(CONFIG_USER_ONLY)
3186     gen_helper_monitor_call(cpu_env, o->addr1,
3187                             tcg_constant_i32(monitor_class));
3188 #endif
3189     /* Defaults to a NOP. */
3190     return DISAS_NEXT;
3191 }
3192 
3193 static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
3194 {
3195     o->out = o->in2;
3196     o->in2 = NULL;
3197     return DISAS_NEXT;
3198 }
3199 
3200 static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
3201 {
3202     int b2 = get_field(s, b2);
3203     TCGv ar1 = tcg_temp_new_i64();
3204 
3205     o->out = o->in2;
3206     o->in2 = NULL;
3207 
3208     switch (s->base.tb->flags & FLAG_MASK_ASC) {
3209     case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3210         tcg_gen_movi_i64(ar1, 0);
3211         break;
3212     case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3213         tcg_gen_movi_i64(ar1, 1);
3214         break;
3215     case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3216         if (b2) {
3217             tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3218         } else {
3219             tcg_gen_movi_i64(ar1, 0);
3220         }
3221         break;
3222     case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3223         tcg_gen_movi_i64(ar1, 2);
3224         break;
3225     }
3226 
3227     tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3228     return DISAS_NEXT;
3229 }
3230 
3231 static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
3232 {
3233     o->out = o->in1;
3234     o->out2 = o->in2;
3235     o->in1 = NULL;
3236     o->in2 = NULL;
3237     return DISAS_NEXT;
3238 }
3239 
3240 static DisasJumpType op_mvc(DisasContext *s, DisasOps *o)
3241 {
3242     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3243 
3244     gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3245     return DISAS_NEXT;
3246 }
3247 
3248 static DisasJumpType op_mvcrl(DisasContext *s, DisasOps *o)
3249 {
3250     gen_helper_mvcrl(cpu_env, regs[0], o->addr1, o->in2);
3251     return DISAS_NEXT;
3252 }
3253 
3254 static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o)
3255 {
3256     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3257 
3258     gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3259     return DISAS_NEXT;
3260 }
3261 
3262 static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o)
3263 {
3264     int r1 = get_field(s, r1);
3265     int r2 = get_field(s, r2);
3266     TCGv_i32 t1, t2;
3267 
3268     /* r1 and r2 must be even.  */
3269     if (r1 & 1 || r2 & 1) {
3270         gen_program_exception(s, PGM_SPECIFICATION);
3271         return DISAS_NORETURN;
3272     }
3273 
3274     t1 = tcg_constant_i32(r1);
3275     t2 = tcg_constant_i32(r2);
3276     gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3277     set_cc_static(s);
3278     return DISAS_NEXT;
3279 }
3280 
3281 static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o)
3282 {
3283     int r1 = get_field(s, r1);
3284     int r3 = get_field(s, r3);
3285     TCGv_i32 t1, t3;
3286 
3287     /* r1 and r3 must be even.  */
3288     if (r1 & 1 || r3 & 1) {
3289         gen_program_exception(s, PGM_SPECIFICATION);
3290         return DISAS_NORETURN;
3291     }
3292 
3293     t1 = tcg_constant_i32(r1);
3294     t3 = tcg_constant_i32(r3);
3295     gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3296     set_cc_static(s);
3297     return DISAS_NEXT;
3298 }
3299 
3300 static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o)
3301 {
3302     int r1 = get_field(s, r1);
3303     int r3 = get_field(s, r3);
3304     TCGv_i32 t1, t3;
3305 
3306     /* r1 and r3 must be even.  */
3307     if (r1 & 1 || r3 & 1) {
3308         gen_program_exception(s, PGM_SPECIFICATION);
3309         return DISAS_NORETURN;
3310     }
3311 
3312     t1 = tcg_constant_i32(r1);
3313     t3 = tcg_constant_i32(r3);
3314     gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3315     set_cc_static(s);
3316     return DISAS_NEXT;
3317 }
3318 
3319 static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o)
3320 {
3321     int r3 = get_field(s, r3);
3322     gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3323     set_cc_static(s);
3324     return DISAS_NEXT;
3325 }
3326 
3327 #ifndef CONFIG_USER_ONLY
3328 static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o)
3329 {
3330     int r1 = get_field(s, l1);
3331     int r3 = get_field(s, r3);
3332     gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3333     set_cc_static(s);
3334     return DISAS_NEXT;
3335 }
3336 
3337 static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o)
3338 {
3339     int r1 = get_field(s, l1);
3340     int r3 = get_field(s, r3);
3341     gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2, regs[r3]);
3342     set_cc_static(s);
3343     return DISAS_NEXT;
3344 }
3345 #endif
3346 
3347 static DisasJumpType op_mvn(DisasContext *s, DisasOps *o)
3348 {
3349     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3350 
3351     gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3352     return DISAS_NEXT;
3353 }
3354 
3355 static DisasJumpType op_mvo(DisasContext *s, DisasOps *o)
3356 {
3357     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3358 
3359     gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3360     return DISAS_NEXT;
3361 }
3362 
3363 static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o)
3364 {
3365     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3366     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3367 
3368     gen_helper_mvpg(cc_op, cpu_env, regs[0], t1, t2);
3369     set_cc_static(s);
3370     return DISAS_NEXT;
3371 }
3372 
3373 static DisasJumpType op_mvst(DisasContext *s, DisasOps *o)
3374 {
3375     TCGv_i32 t1 = tcg_constant_i32(get_field(s, r1));
3376     TCGv_i32 t2 = tcg_constant_i32(get_field(s, r2));
3377 
3378     gen_helper_mvst(cc_op, cpu_env, t1, t2);
3379     set_cc_static(s);
3380     return DISAS_NEXT;
3381 }
3382 
3383 static DisasJumpType op_mvz(DisasContext *s, DisasOps *o)
3384 {
3385     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3386 
3387     gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3388     return DISAS_NEXT;
3389 }
3390 
3391 static DisasJumpType op_mul(DisasContext *s, DisasOps *o)
3392 {
3393     tcg_gen_mul_i64(o->out, o->in1, o->in2);
3394     return DISAS_NEXT;
3395 }
3396 
3397 static DisasJumpType op_mul128(DisasContext *s, DisasOps *o)
3398 {
3399     tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3400     return DISAS_NEXT;
3401 }
3402 
3403 static DisasJumpType op_muls128(DisasContext *s, DisasOps *o)
3404 {
3405     tcg_gen_muls2_i64(o->out2, o->out, o->in1, o->in2);
3406     return DISAS_NEXT;
3407 }
3408 
3409 static DisasJumpType op_meeb(DisasContext *s, DisasOps *o)
3410 {
3411     gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3412     return DISAS_NEXT;
3413 }
3414 
3415 static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o)
3416 {
3417     gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3418     return DISAS_NEXT;
3419 }
3420 
3421 static DisasJumpType op_mdb(DisasContext *s, DisasOps *o)
3422 {
3423     gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3424     return DISAS_NEXT;
3425 }
3426 
3427 static DisasJumpType op_mxb(DisasContext *s, DisasOps *o)
3428 {
3429     gen_helper_mxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3430     return DISAS_NEXT;
3431 }
3432 
3433 static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o)
3434 {
3435     gen_helper_mxdb(o->out_128, cpu_env, o->in1_128, o->in2);
3436     return DISAS_NEXT;
3437 }
3438 
3439 static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
3440 {
3441     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3442     gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3443     return DISAS_NEXT;
3444 }
3445 
3446 static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
3447 {
3448     TCGv_i64 r3 = load_freg(get_field(s, r3));
3449     gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
3450     return DISAS_NEXT;
3451 }
3452 
3453 static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
3454 {
3455     TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
3456     gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3457     return DISAS_NEXT;
3458 }
3459 
3460 static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
3461 {
3462     TCGv_i64 r3 = load_freg(get_field(s, r3));
3463     gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
3464     return DISAS_NEXT;
3465 }
3466 
3467 static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
3468 {
3469     TCGv_i64 z = tcg_constant_i64(0);
3470     TCGv_i64 n = tcg_temp_new_i64();
3471 
3472     tcg_gen_neg_i64(n, o->in2);
3473     tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3474     return DISAS_NEXT;
3475 }
3476 
3477 static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o)
3478 {
3479     tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3480     return DISAS_NEXT;
3481 }
3482 
3483 static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o)
3484 {
3485     tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3486     return DISAS_NEXT;
3487 }
3488 
3489 static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o)
3490 {
3491     tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3492     tcg_gen_mov_i64(o->out2, o->in2);
3493     return DISAS_NEXT;
3494 }
3495 
3496 static DisasJumpType op_nc(DisasContext *s, DisasOps *o)
3497 {
3498     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3499 
3500     gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3501     set_cc_static(s);
3502     return DISAS_NEXT;
3503 }
3504 
3505 static DisasJumpType op_neg(DisasContext *s, DisasOps *o)
3506 {
3507     tcg_gen_neg_i64(o->out, o->in2);
3508     return DISAS_NEXT;
3509 }
3510 
3511 static DisasJumpType op_negf32(DisasContext *s, DisasOps *o)
3512 {
3513     tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3514     return DISAS_NEXT;
3515 }
3516 
3517 static DisasJumpType op_negf64(DisasContext *s, DisasOps *o)
3518 {
3519     tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3520     return DISAS_NEXT;
3521 }
3522 
3523 static DisasJumpType op_negf128(DisasContext *s, DisasOps *o)
3524 {
3525     tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3526     tcg_gen_mov_i64(o->out2, o->in2);
3527     return DISAS_NEXT;
3528 }
3529 
3530 static DisasJumpType op_oc(DisasContext *s, DisasOps *o)
3531 {
3532     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3533 
3534     gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3535     set_cc_static(s);
3536     return DISAS_NEXT;
3537 }
3538 
3539 static DisasJumpType op_or(DisasContext *s, DisasOps *o)
3540 {
3541     tcg_gen_or_i64(o->out, o->in1, o->in2);
3542     return DISAS_NEXT;
3543 }
3544 
3545 static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
3546 {
3547     int shift = s->insn->data & 0xff;
3548     int size = s->insn->data >> 8;
3549     uint64_t mask = ((1ull << size) - 1) << shift;
3550     TCGv_i64 t = tcg_temp_new_i64();
3551 
3552     tcg_gen_shli_i64(t, o->in2, shift);
3553     tcg_gen_or_i64(o->out, o->in1, t);
3554 
3555     /* Produce the CC from only the bits manipulated.  */
3556     tcg_gen_andi_i64(cc_dst, o->out, mask);
3557     set_cc_nz_u64(s, cc_dst);
3558     return DISAS_NEXT;
3559 }
3560 
3561 static DisasJumpType op_oi(DisasContext *s, DisasOps *o)
3562 {
3563     o->in1 = tcg_temp_new_i64();
3564 
3565     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3566         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
3567     } else {
3568         /* Perform the atomic operation in memory. */
3569         tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
3570                                     s->insn->data);
3571     }
3572 
3573     /* Recompute also for atomic case: needed for setting CC. */
3574     tcg_gen_or_i64(o->out, o->in1, o->in2);
3575 
3576     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
3577         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
3578     }
3579     return DISAS_NEXT;
3580 }
3581 
3582 static DisasJumpType op_pack(DisasContext *s, DisasOps *o)
3583 {
3584     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
3585 
3586     gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3587     return DISAS_NEXT;
3588 }
3589 
3590 static DisasJumpType op_pka(DisasContext *s, DisasOps *o)
3591 {
3592     int l2 = get_field(s, l2) + 1;
3593     TCGv_i32 l;
3594 
3595     /* The length must not exceed 32 bytes.  */
3596     if (l2 > 32) {
3597         gen_program_exception(s, PGM_SPECIFICATION);
3598         return DISAS_NORETURN;
3599     }
3600     l = tcg_constant_i32(l2);
3601     gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3602     return DISAS_NEXT;
3603 }
3604 
3605 static DisasJumpType op_pku(DisasContext *s, DisasOps *o)
3606 {
3607     int l2 = get_field(s, l2) + 1;
3608     TCGv_i32 l;
3609 
3610     /* The length must be even and should not exceed 64 bytes.  */
3611     if ((l2 & 1) || (l2 > 64)) {
3612         gen_program_exception(s, PGM_SPECIFICATION);
3613         return DISAS_NORETURN;
3614     }
3615     l = tcg_constant_i32(l2);
3616     gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3617     return DISAS_NEXT;
3618 }
3619 
3620 static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o)
3621 {
3622     const uint8_t m3 = get_field(s, m3);
3623 
3624     if ((m3 & 8) && s390_has_feat(S390_FEAT_MISC_INSTRUCTION_EXT3)) {
3625         tcg_gen_ctpop_i64(o->out, o->in2);
3626     } else {
3627         gen_helper_popcnt(o->out, o->in2);
3628     }
3629     return DISAS_NEXT;
3630 }
3631 
3632 #ifndef CONFIG_USER_ONLY
3633 static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o)
3634 {
3635     gen_helper_ptlb(cpu_env);
3636     return DISAS_NEXT;
3637 }
3638 #endif
3639 
3640 static DisasJumpType op_risbg(DisasContext *s, DisasOps *o)
3641 {
3642     int i3 = get_field(s, i3);
3643     int i4 = get_field(s, i4);
3644     int i5 = get_field(s, i5);
3645     int do_zero = i4 & 0x80;
3646     uint64_t mask, imask, pmask;
3647     int pos, len, rot;
3648 
3649     /* Adjust the arguments for the specific insn.  */
3650     switch (s->fields.op2) {
3651     case 0x55: /* risbg */
3652     case 0x59: /* risbgn */
3653         i3 &= 63;
3654         i4 &= 63;
3655         pmask = ~0;
3656         break;
3657     case 0x5d: /* risbhg */
3658         i3 &= 31;
3659         i4 &= 31;
3660         pmask = 0xffffffff00000000ull;
3661         break;
3662     case 0x51: /* risblg */
3663         i3 = (i3 & 31) + 32;
3664         i4 = (i4 & 31) + 32;
3665         pmask = 0x00000000ffffffffull;
3666         break;
3667     default:
3668         g_assert_not_reached();
3669     }
3670 
3671     /* MASK is the set of bits to be inserted from R2. */
3672     if (i3 <= i4) {
3673         /* [0...i3---i4...63] */
3674         mask = (-1ull >> i3) & (-1ull << (63 - i4));
3675     } else {
3676         /* [0---i4...i3---63] */
3677         mask = (-1ull >> i3) | (-1ull << (63 - i4));
3678     }
3679     /* For RISBLG/RISBHG, the wrapping is limited to the high/low doubleword. */
3680     mask &= pmask;
3681 
3682     /* IMASK is the set of bits to be kept from R1.  In the case of the high/low
3683        insns, we need to keep the other half of the register.  */
3684     imask = ~mask | ~pmask;
3685     if (do_zero) {
3686         imask = ~pmask;
3687     }
3688 
3689     len = i4 - i3 + 1;
3690     pos = 63 - i4;
3691     rot = i5 & 63;
3692 
3693     /* In some cases we can implement this with extract.  */
3694     if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3695         tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3696         return DISAS_NEXT;
3697     }
3698 
3699     /* In some cases we can implement this with deposit.  */
3700     if (len > 0 && (imask == 0 || ~mask == imask)) {
3701         /* Note that we rotate the bits to be inserted to the lsb, not to
3702            the position as described in the PoO.  */
3703         rot = (rot - pos) & 63;
3704     } else {
3705         pos = -1;
3706     }
3707 
3708     /* Rotate the input as necessary.  */
3709     tcg_gen_rotli_i64(o->in2, o->in2, rot);
3710 
3711     /* Insert the selected bits into the output.  */
3712     if (pos >= 0) {
3713         if (imask == 0) {
3714             tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3715         } else {
3716             tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3717         }
3718     } else if (imask == 0) {
3719         tcg_gen_andi_i64(o->out, o->in2, mask);
3720     } else {
3721         tcg_gen_andi_i64(o->in2, o->in2, mask);
3722         tcg_gen_andi_i64(o->out, o->out, imask);
3723         tcg_gen_or_i64(o->out, o->out, o->in2);
3724     }
3725     return DISAS_NEXT;
3726 }
3727 
3728 static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
3729 {
3730     int i3 = get_field(s, i3);
3731     int i4 = get_field(s, i4);
3732     int i5 = get_field(s, i5);
3733     TCGv_i64 orig_out;
3734     uint64_t mask;
3735 
3736     /* If this is a test-only form, arrange to discard the result.  */
3737     if (i3 & 0x80) {
3738         tcg_debug_assert(o->out != NULL);
3739         orig_out = o->out;
3740         o->out = tcg_temp_new_i64();
3741         tcg_gen_mov_i64(o->out, orig_out);
3742     }
3743 
3744     i3 &= 63;
3745     i4 &= 63;
3746     i5 &= 63;
3747 
3748     /* MASK is the set of bits to be operated on from R2.
3749        Take care for I3/I4 wraparound.  */
3750     mask = ~0ull >> i3;
3751     if (i3 <= i4) {
3752         mask ^= ~0ull >> i4 >> 1;
3753     } else {
3754         mask |= ~(~0ull >> i4 >> 1);
3755     }
3756 
3757     /* Rotate the input as necessary.  */
3758     tcg_gen_rotli_i64(o->in2, o->in2, i5);
3759 
3760     /* Operate.  */
3761     switch (s->fields.op2) {
3762     case 0x54: /* AND */
3763         tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3764         tcg_gen_and_i64(o->out, o->out, o->in2);
3765         break;
3766     case 0x56: /* OR */
3767         tcg_gen_andi_i64(o->in2, o->in2, mask);
3768         tcg_gen_or_i64(o->out, o->out, o->in2);
3769         break;
3770     case 0x57: /* XOR */
3771         tcg_gen_andi_i64(o->in2, o->in2, mask);
3772         tcg_gen_xor_i64(o->out, o->out, o->in2);
3773         break;
3774     default:
3775         abort();
3776     }
3777 
3778     /* Set the CC.  */
3779     tcg_gen_andi_i64(cc_dst, o->out, mask);
3780     set_cc_nz_u64(s, cc_dst);
3781     return DISAS_NEXT;
3782 }
3783 
3784 static DisasJumpType op_rev16(DisasContext *s, DisasOps *o)
3785 {
3786     tcg_gen_bswap16_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3787     return DISAS_NEXT;
3788 }
3789 
3790 static DisasJumpType op_rev32(DisasContext *s, DisasOps *o)
3791 {
3792     tcg_gen_bswap32_i64(o->out, o->in2, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
3793     return DISAS_NEXT;
3794 }
3795 
3796 static DisasJumpType op_rev64(DisasContext *s, DisasOps *o)
3797 {
3798     tcg_gen_bswap64_i64(o->out, o->in2);
3799     return DISAS_NEXT;
3800 }
3801 
3802 static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
3803 {
3804     TCGv_i32 t1 = tcg_temp_new_i32();
3805     TCGv_i32 t2 = tcg_temp_new_i32();
3806     TCGv_i32 to = tcg_temp_new_i32();
3807     tcg_gen_extrl_i64_i32(t1, o->in1);
3808     tcg_gen_extrl_i64_i32(t2, o->in2);
3809     tcg_gen_rotl_i32(to, t1, t2);
3810     tcg_gen_extu_i32_i64(o->out, to);
3811     return DISAS_NEXT;
3812 }
3813 
3814 static DisasJumpType op_rll64(DisasContext *s, DisasOps *o)
3815 {
3816     tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3817     return DISAS_NEXT;
3818 }
3819 
3820 #ifndef CONFIG_USER_ONLY
3821 static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o)
3822 {
3823     gen_helper_rrbe(cc_op, cpu_env, o->in2);
3824     set_cc_static(s);
3825     return DISAS_NEXT;
3826 }
3827 
3828 static DisasJumpType op_sacf(DisasContext *s, DisasOps *o)
3829 {
3830     gen_helper_sacf(cpu_env, o->in2);
3831     /* Addressing mode has changed, so end the block.  */
3832     return DISAS_TOO_MANY;
3833 }
3834 #endif
3835 
3836 static DisasJumpType op_sam(DisasContext *s, DisasOps *o)
3837 {
3838     int sam = s->insn->data;
3839     TCGv_i64 tsam;
3840     uint64_t mask;
3841 
3842     switch (sam) {
3843     case 0:
3844         mask = 0xffffff;
3845         break;
3846     case 1:
3847         mask = 0x7fffffff;
3848         break;
3849     default:
3850         mask = -1;
3851         break;
3852     }
3853 
3854     /* Bizarre but true, we check the address of the current insn for the
3855        specification exception, not the next to be executed.  Thus the PoO
3856        documents that Bad Things Happen two bytes before the end.  */
3857     if (s->base.pc_next & ~mask) {
3858         gen_program_exception(s, PGM_SPECIFICATION);
3859         return DISAS_NORETURN;
3860     }
3861     s->pc_tmp &= mask;
3862 
3863     tsam = tcg_constant_i64(sam);
3864     tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3865 
3866     /* Always exit the TB, since we (may have) changed execution mode.  */
3867     return DISAS_TOO_MANY;
3868 }
3869 
3870 static DisasJumpType op_sar(DisasContext *s, DisasOps *o)
3871 {
3872     int r1 = get_field(s, r1);
3873     tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3874     return DISAS_NEXT;
3875 }
3876 
3877 static DisasJumpType op_seb(DisasContext *s, DisasOps *o)
3878 {
3879     gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3880     return DISAS_NEXT;
3881 }
3882 
3883 static DisasJumpType op_sdb(DisasContext *s, DisasOps *o)
3884 {
3885     gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3886     return DISAS_NEXT;
3887 }
3888 
3889 static DisasJumpType op_sxb(DisasContext *s, DisasOps *o)
3890 {
3891     gen_helper_sxb(o->out_128, cpu_env, o->in1_128, o->in2_128);
3892     return DISAS_NEXT;
3893 }
3894 
3895 static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o)
3896 {
3897     gen_helper_sqeb(o->out, cpu_env, o->in2);
3898     return DISAS_NEXT;
3899 }
3900 
3901 static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o)
3902 {
3903     gen_helper_sqdb(o->out, cpu_env, o->in2);
3904     return DISAS_NEXT;
3905 }
3906 
3907 static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o)
3908 {
3909     gen_helper_sqxb(o->out_128, cpu_env, o->in2_128);
3910     return DISAS_NEXT;
3911 }
3912 
3913 #ifndef CONFIG_USER_ONLY
3914 static DisasJumpType op_servc(DisasContext *s, DisasOps *o)
3915 {
3916     gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3917     set_cc_static(s);
3918     return DISAS_NEXT;
3919 }
3920 
3921 static DisasJumpType op_sigp(DisasContext *s, DisasOps *o)
3922 {
3923     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
3924     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
3925 
3926     gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3927     set_cc_static(s);
3928     return DISAS_NEXT;
3929 }
3930 #endif
3931 
3932 static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
3933 {
3934     DisasCompare c;
3935     TCGv_i64 a, h;
3936     TCGLabel *lab;
3937     int r1;
3938 
3939     disas_jcc(s, &c, get_field(s, m3));
3940 
3941     /* We want to store when the condition is fulfilled, so branch
3942        out when it's not */
3943     c.cond = tcg_invert_cond(c.cond);
3944 
3945     lab = gen_new_label();
3946     if (c.is_64) {
3947         tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3948     } else {
3949         tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3950     }
3951 
3952     r1 = get_field(s, r1);
3953     a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
3954     switch (s->insn->data) {
3955     case 1: /* STOCG */
3956         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
3957         break;
3958     case 0: /* STOC */
3959         tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
3960         break;
3961     case 2: /* STOCFH */
3962         h = tcg_temp_new_i64();
3963         tcg_gen_shri_i64(h, regs[r1], 32);
3964         tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
3965         break;
3966     default:
3967         g_assert_not_reached();
3968     }
3969 
3970     gen_set_label(lab);
3971     return DISAS_NEXT;
3972 }
3973 
3974 static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
3975 {
3976     TCGv_i64 t;
3977     uint64_t sign = 1ull << s->insn->data;
3978     if (s->insn->data == 31) {
3979         t = tcg_temp_new_i64();
3980         tcg_gen_shli_i64(t, o->in1, 32);
3981     } else {
3982         t = o->in1;
3983     }
3984     gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
3985     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3986     /* The arithmetic left shift is curious in that it does not affect
3987        the sign bit.  Copy that over from the source unchanged.  */
3988     tcg_gen_andi_i64(o->out, o->out, ~sign);
3989     tcg_gen_andi_i64(o->in1, o->in1, sign);
3990     tcg_gen_or_i64(o->out, o->out, o->in1);
3991     return DISAS_NEXT;
3992 }
3993 
3994 static DisasJumpType op_sll(DisasContext *s, DisasOps *o)
3995 {
3996     tcg_gen_shl_i64(o->out, o->in1, o->in2);
3997     return DISAS_NEXT;
3998 }
3999 
4000 static DisasJumpType op_sra(DisasContext *s, DisasOps *o)
4001 {
4002     tcg_gen_sar_i64(o->out, o->in1, o->in2);
4003     return DISAS_NEXT;
4004 }
4005 
4006 static DisasJumpType op_srl(DisasContext *s, DisasOps *o)
4007 {
4008     tcg_gen_shr_i64(o->out, o->in1, o->in2);
4009     return DISAS_NEXT;
4010 }
4011 
4012 static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o)
4013 {
4014     gen_helper_sfpc(cpu_env, o->in2);
4015     return DISAS_NEXT;
4016 }
4017 
4018 static DisasJumpType op_sfas(DisasContext *s, DisasOps *o)
4019 {
4020     gen_helper_sfas(cpu_env, o->in2);
4021     return DISAS_NEXT;
4022 }
4023 
4024 static DisasJumpType op_srnm(DisasContext *s, DisasOps *o)
4025 {
4026     /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */
4027     tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull);
4028     gen_helper_srnm(cpu_env, o->addr1);
4029     return DISAS_NEXT;
4030 }
4031 
4032 static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o)
4033 {
4034     /* Bits 0-55 are are ignored. */
4035     tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull);
4036     gen_helper_srnm(cpu_env, o->addr1);
4037     return DISAS_NEXT;
4038 }
4039 
4040 static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
4041 {
4042     TCGv_i64 tmp = tcg_temp_new_i64();
4043 
4044     /* Bits other than 61-63 are ignored. */
4045     tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull);
4046 
4047     /* No need to call a helper, we don't implement dfp */
4048     tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4049     tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
4050     tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
4051     return DISAS_NEXT;
4052 }
4053 
4054 static DisasJumpType op_spm(DisasContext *s, DisasOps *o)
4055 {
4056     tcg_gen_extrl_i64_i32(cc_op, o->in1);
4057     tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
4058     set_cc_static(s);
4059 
4060     tcg_gen_shri_i64(o->in1, o->in1, 24);
4061     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
4062     return DISAS_NEXT;
4063 }
4064 
4065 static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
4066 {
4067     int b1 = get_field(s, b1);
4068     int d1 = get_field(s, d1);
4069     int b2 = get_field(s, b2);
4070     int d2 = get_field(s, d2);
4071     int r3 = get_field(s, r3);
4072     TCGv_i64 tmp = tcg_temp_new_i64();
4073 
4074     /* fetch all operands first */
4075     o->in1 = tcg_temp_new_i64();
4076     tcg_gen_addi_i64(o->in1, regs[b1], d1);
4077     o->in2 = tcg_temp_new_i64();
4078     tcg_gen_addi_i64(o->in2, regs[b2], d2);
4079     o->addr1 = tcg_temp_new_i64();
4080     gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
4081 
4082     /* load the third operand into r3 before modifying anything */
4083     tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
4084 
4085     /* subtract CPU timer from first operand and store in GR0 */
4086     gen_helper_stpt(tmp, cpu_env);
4087     tcg_gen_sub_i64(regs[0], o->in1, tmp);
4088 
4089     /* store second operand in GR1 */
4090     tcg_gen_mov_i64(regs[1], o->in2);
4091     return DISAS_NEXT;
4092 }
4093 
4094 #ifndef CONFIG_USER_ONLY
4095 static DisasJumpType op_spka(DisasContext *s, DisasOps *o)
4096 {
4097     tcg_gen_shri_i64(o->in2, o->in2, 4);
4098     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
4099     return DISAS_NEXT;
4100 }
4101 
4102 static DisasJumpType op_sske(DisasContext *s, DisasOps *o)
4103 {
4104     gen_helper_sske(cpu_env, o->in1, o->in2);
4105     return DISAS_NEXT;
4106 }
4107 
4108 static void gen_check_psw_mask(DisasContext *s)
4109 {
4110     TCGv_i64 reserved = tcg_temp_new_i64();
4111     TCGLabel *ok = gen_new_label();
4112 
4113     tcg_gen_andi_i64(reserved, psw_mask, PSW_MASK_RESERVED);
4114     tcg_gen_brcondi_i64(TCG_COND_EQ, reserved, 0, ok);
4115     gen_program_exception(s, PGM_SPECIFICATION);
4116     gen_set_label(ok);
4117 }
4118 
4119 static DisasJumpType op_ssm(DisasContext *s, DisasOps *o)
4120 {
4121     tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
4122 
4123     gen_check_psw_mask(s);
4124 
4125     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4126     s->exit_to_mainloop = true;
4127     return DISAS_TOO_MANY;
4128 }
4129 
4130 static DisasJumpType op_stap(DisasContext *s, DisasOps *o)
4131 {
4132     tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
4133     return DISAS_NEXT;
4134 }
4135 #endif
4136 
4137 static DisasJumpType op_stck(DisasContext *s, DisasOps *o)
4138 {
4139     gen_helper_stck(o->out, cpu_env);
4140     /* ??? We don't implement clock states.  */
4141     gen_op_movi_cc(s, 0);
4142     return DISAS_NEXT;
4143 }
4144 
4145 static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
4146 {
4147     TCGv_i64 c1 = tcg_temp_new_i64();
4148     TCGv_i64 c2 = tcg_temp_new_i64();
4149     TCGv_i64 todpr = tcg_temp_new_i64();
4150     gen_helper_stck(c1, cpu_env);
4151     /* 16 bit value store in an uint32_t (only valid bits set) */
4152     tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
4153     /* Shift the 64-bit value into its place as a zero-extended
4154        104-bit value.  Note that "bit positions 64-103 are always
4155        non-zero so that they compare differently to STCK"; we set
4156        the least significant bit to 1.  */
4157     tcg_gen_shli_i64(c2, c1, 56);
4158     tcg_gen_shri_i64(c1, c1, 8);
4159     tcg_gen_ori_i64(c2, c2, 0x10000);
4160     tcg_gen_or_i64(c2, c2, todpr);
4161     tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
4162     tcg_gen_addi_i64(o->in2, o->in2, 8);
4163     tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
4164     /* ??? We don't implement clock states.  */
4165     gen_op_movi_cc(s, 0);
4166     return DISAS_NEXT;
4167 }
4168 
4169 #ifndef CONFIG_USER_ONLY
4170 static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
4171 {
4172     gen_helper_sck(cc_op, cpu_env, o->in2);
4173     set_cc_static(s);
4174     return DISAS_NEXT;
4175 }
4176 
4177 static DisasJumpType op_sckc(DisasContext *s, DisasOps *o)
4178 {
4179     gen_helper_sckc(cpu_env, o->in2);
4180     return DISAS_NEXT;
4181 }
4182 
4183 static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o)
4184 {
4185     gen_helper_sckpf(cpu_env, regs[0]);
4186     return DISAS_NEXT;
4187 }
4188 
4189 static DisasJumpType op_stckc(DisasContext *s, DisasOps *o)
4190 {
4191     gen_helper_stckc(o->out, cpu_env);
4192     return DISAS_NEXT;
4193 }
4194 
4195 static DisasJumpType op_stctg(DisasContext *s, DisasOps *o)
4196 {
4197     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4198     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4199 
4200     gen_helper_stctg(cpu_env, r1, o->in2, r3);
4201     return DISAS_NEXT;
4202 }
4203 
4204 static DisasJumpType op_stctl(DisasContext *s, DisasOps *o)
4205 {
4206     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4207     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4208 
4209     gen_helper_stctl(cpu_env, r1, o->in2, r3);
4210     return DISAS_NEXT;
4211 }
4212 
4213 static DisasJumpType op_stidp(DisasContext *s, DisasOps *o)
4214 {
4215     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
4216     return DISAS_NEXT;
4217 }
4218 
4219 static DisasJumpType op_spt(DisasContext *s, DisasOps *o)
4220 {
4221     gen_helper_spt(cpu_env, o->in2);
4222     return DISAS_NEXT;
4223 }
4224 
4225 static DisasJumpType op_stfl(DisasContext *s, DisasOps *o)
4226 {
4227     gen_helper_stfl(cpu_env);
4228     return DISAS_NEXT;
4229 }
4230 
4231 static DisasJumpType op_stpt(DisasContext *s, DisasOps *o)
4232 {
4233     gen_helper_stpt(o->out, cpu_env);
4234     return DISAS_NEXT;
4235 }
4236 
4237 static DisasJumpType op_stsi(DisasContext *s, DisasOps *o)
4238 {
4239     gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
4240     set_cc_static(s);
4241     return DISAS_NEXT;
4242 }
4243 
4244 static DisasJumpType op_spx(DisasContext *s, DisasOps *o)
4245 {
4246     gen_helper_spx(cpu_env, o->in2);
4247     return DISAS_NEXT;
4248 }
4249 
4250 static DisasJumpType op_xsch(DisasContext *s, DisasOps *o)
4251 {
4252     gen_helper_xsch(cpu_env, regs[1]);
4253     set_cc_static(s);
4254     return DISAS_NEXT;
4255 }
4256 
4257 static DisasJumpType op_csch(DisasContext *s, DisasOps *o)
4258 {
4259     gen_helper_csch(cpu_env, regs[1]);
4260     set_cc_static(s);
4261     return DISAS_NEXT;
4262 }
4263 
4264 static DisasJumpType op_hsch(DisasContext *s, DisasOps *o)
4265 {
4266     gen_helper_hsch(cpu_env, regs[1]);
4267     set_cc_static(s);
4268     return DISAS_NEXT;
4269 }
4270 
4271 static DisasJumpType op_msch(DisasContext *s, DisasOps *o)
4272 {
4273     gen_helper_msch(cpu_env, regs[1], o->in2);
4274     set_cc_static(s);
4275     return DISAS_NEXT;
4276 }
4277 
4278 static DisasJumpType op_rchp(DisasContext *s, DisasOps *o)
4279 {
4280     gen_helper_rchp(cpu_env, regs[1]);
4281     set_cc_static(s);
4282     return DISAS_NEXT;
4283 }
4284 
4285 static DisasJumpType op_rsch(DisasContext *s, DisasOps *o)
4286 {
4287     gen_helper_rsch(cpu_env, regs[1]);
4288     set_cc_static(s);
4289     return DISAS_NEXT;
4290 }
4291 
4292 static DisasJumpType op_sal(DisasContext *s, DisasOps *o)
4293 {
4294     gen_helper_sal(cpu_env, regs[1]);
4295     return DISAS_NEXT;
4296 }
4297 
4298 static DisasJumpType op_schm(DisasContext *s, DisasOps *o)
4299 {
4300     gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
4301     return DISAS_NEXT;
4302 }
4303 
4304 static DisasJumpType op_siga(DisasContext *s, DisasOps *o)
4305 {
4306     /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
4307     gen_op_movi_cc(s, 3);
4308     return DISAS_NEXT;
4309 }
4310 
4311 static DisasJumpType op_stcps(DisasContext *s, DisasOps *o)
4312 {
4313     /* The instruction is suppressed if not provided. */
4314     return DISAS_NEXT;
4315 }
4316 
4317 static DisasJumpType op_ssch(DisasContext *s, DisasOps *o)
4318 {
4319     gen_helper_ssch(cpu_env, regs[1], o->in2);
4320     set_cc_static(s);
4321     return DISAS_NEXT;
4322 }
4323 
4324 static DisasJumpType op_stsch(DisasContext *s, DisasOps *o)
4325 {
4326     gen_helper_stsch(cpu_env, regs[1], o->in2);
4327     set_cc_static(s);
4328     return DISAS_NEXT;
4329 }
4330 
4331 static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o)
4332 {
4333     gen_helper_stcrw(cpu_env, o->in2);
4334     set_cc_static(s);
4335     return DISAS_NEXT;
4336 }
4337 
4338 static DisasJumpType op_tpi(DisasContext *s, DisasOps *o)
4339 {
4340     gen_helper_tpi(cc_op, cpu_env, o->addr1);
4341     set_cc_static(s);
4342     return DISAS_NEXT;
4343 }
4344 
4345 static DisasJumpType op_tsch(DisasContext *s, DisasOps *o)
4346 {
4347     gen_helper_tsch(cpu_env, regs[1], o->in2);
4348     set_cc_static(s);
4349     return DISAS_NEXT;
4350 }
4351 
4352 static DisasJumpType op_chsc(DisasContext *s, DisasOps *o)
4353 {
4354     gen_helper_chsc(cpu_env, o->in2);
4355     set_cc_static(s);
4356     return DISAS_NEXT;
4357 }
4358 
4359 static DisasJumpType op_stpx(DisasContext *s, DisasOps *o)
4360 {
4361     tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4362     tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4363     return DISAS_NEXT;
4364 }
4365 
4366 static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
4367 {
4368     uint64_t i2 = get_field(s, i2);
4369     TCGv_i64 t;
4370 
4371     /* It is important to do what the instruction name says: STORE THEN.
4372        If we let the output hook perform the store then if we fault and
4373        restart, we'll have the wrong SYSTEM MASK in place.  */
4374     t = tcg_temp_new_i64();
4375     tcg_gen_shri_i64(t, psw_mask, 56);
4376     tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
4377 
4378     if (s->fields.op == 0xac) {
4379         tcg_gen_andi_i64(psw_mask, psw_mask,
4380                          (i2 << 56) | 0x00ffffffffffffffull);
4381     } else {
4382         tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4383     }
4384 
4385     gen_check_psw_mask(s);
4386 
4387     /* Exit to main loop to reevaluate s390_cpu_exec_interrupt.  */
4388     s->exit_to_mainloop = true;
4389     return DISAS_TOO_MANY;
4390 }
4391 
4392 static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
4393 {
4394     tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
4395 
4396     if (s->base.tb->flags & FLAG_MASK_PER) {
4397         update_psw_addr(s);
4398         gen_helper_per_store_real(cpu_env);
4399     }
4400     return DISAS_NEXT;
4401 }
4402 #endif
4403 
4404 static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
4405 {
4406     gen_helper_stfle(cc_op, cpu_env, o->in2);
4407     set_cc_static(s);
4408     return DISAS_NEXT;
4409 }
4410 
4411 static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
4412 {
4413     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
4414     return DISAS_NEXT;
4415 }
4416 
4417 static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
4418 {
4419     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
4420     return DISAS_NEXT;
4421 }
4422 
4423 static DisasJumpType op_st32(DisasContext *s, DisasOps *o)
4424 {
4425     tcg_gen_qemu_st_tl(o->in1, o->in2, get_mem_index(s),
4426                        MO_TEUL | s->insn->data);
4427     return DISAS_NEXT;
4428 }
4429 
4430 static DisasJumpType op_st64(DisasContext *s, DisasOps *o)
4431 {
4432     tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s),
4433                         MO_TEUQ | s->insn->data);
4434     return DISAS_NEXT;
4435 }
4436 
4437 static DisasJumpType op_stam(DisasContext *s, DisasOps *o)
4438 {
4439     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4440     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4441 
4442     gen_helper_stam(cpu_env, r1, o->in2, r3);
4443     return DISAS_NEXT;
4444 }
4445 
4446 static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
4447 {
4448     int m3 = get_field(s, m3);
4449     int pos, base = s->insn->data;
4450     TCGv_i64 tmp = tcg_temp_new_i64();
4451 
4452     pos = base + ctz32(m3) * 8;
4453     switch (m3) {
4454     case 0xf:
4455         /* Effectively a 32-bit store.  */
4456         tcg_gen_shri_i64(tmp, o->in1, pos);
4457         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
4458         break;
4459 
4460     case 0xc:
4461     case 0x6:
4462     case 0x3:
4463         /* Effectively a 16-bit store.  */
4464         tcg_gen_shri_i64(tmp, o->in1, pos);
4465         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
4466         break;
4467 
4468     case 0x8:
4469     case 0x4:
4470     case 0x2:
4471     case 0x1:
4472         /* Effectively an 8-bit store.  */
4473         tcg_gen_shri_i64(tmp, o->in1, pos);
4474         tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4475         break;
4476 
4477     default:
4478         /* This is going to be a sequence of shifts and stores.  */
4479         pos = base + 32 - 8;
4480         while (m3) {
4481             if (m3 & 0x8) {
4482                 tcg_gen_shri_i64(tmp, o->in1, pos);
4483                 tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
4484                 tcg_gen_addi_i64(o->in2, o->in2, 1);
4485             }
4486             m3 = (m3 << 1) & 0xf;
4487             pos -= 8;
4488         }
4489         break;
4490     }
4491     return DISAS_NEXT;
4492 }
4493 
4494 static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
4495 {
4496     int r1 = get_field(s, r1);
4497     int r3 = get_field(s, r3);
4498     int size = s->insn->data;
4499     TCGv_i64 tsize = tcg_constant_i64(size);
4500 
4501     while (1) {
4502         tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
4503                             size == 8 ? MO_TEUQ : MO_TEUL);
4504         if (r1 == r3) {
4505             break;
4506         }
4507         tcg_gen_add_i64(o->in2, o->in2, tsize);
4508         r1 = (r1 + 1) & 15;
4509     }
4510 
4511     return DISAS_NEXT;
4512 }
4513 
4514 static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
4515 {
4516     int r1 = get_field(s, r1);
4517     int r3 = get_field(s, r3);
4518     TCGv_i64 t = tcg_temp_new_i64();
4519     TCGv_i64 t4 = tcg_constant_i64(4);
4520     TCGv_i64 t32 = tcg_constant_i64(32);
4521 
4522     while (1) {
4523         tcg_gen_shl_i64(t, regs[r1], t32);
4524         tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
4525         if (r1 == r3) {
4526             break;
4527         }
4528         tcg_gen_add_i64(o->in2, o->in2, t4);
4529         r1 = (r1 + 1) & 15;
4530     }
4531     return DISAS_NEXT;
4532 }
4533 
4534 static DisasJumpType op_stpq(DisasContext *s, DisasOps *o)
4535 {
4536     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
4537         gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4538     } else if (HAVE_ATOMIC128) {
4539         gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4540     } else {
4541         gen_helper_exit_atomic(cpu_env);
4542         return DISAS_NORETURN;
4543     }
4544     return DISAS_NEXT;
4545 }
4546 
4547 static DisasJumpType op_srst(DisasContext *s, DisasOps *o)
4548 {
4549     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4550     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4551 
4552     gen_helper_srst(cpu_env, r1, r2);
4553     set_cc_static(s);
4554     return DISAS_NEXT;
4555 }
4556 
4557 static DisasJumpType op_srstu(DisasContext *s, DisasOps *o)
4558 {
4559     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4560     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4561 
4562     gen_helper_srstu(cpu_env, r1, r2);
4563     set_cc_static(s);
4564     return DISAS_NEXT;
4565 }
4566 
4567 static DisasJumpType op_sub(DisasContext *s, DisasOps *o)
4568 {
4569     tcg_gen_sub_i64(o->out, o->in1, o->in2);
4570     return DISAS_NEXT;
4571 }
4572 
4573 static DisasJumpType op_subu64(DisasContext *s, DisasOps *o)
4574 {
4575     tcg_gen_movi_i64(cc_src, 0);
4576     tcg_gen_sub2_i64(o->out, cc_src, o->in1, cc_src, o->in2, cc_src);
4577     return DISAS_NEXT;
4578 }
4579 
4580 /* Compute borrow (0, -1) into cc_src. */
4581 static void compute_borrow(DisasContext *s)
4582 {
4583     switch (s->cc_op) {
4584     case CC_OP_SUBU:
4585         /* The borrow value is already in cc_src (0,-1). */
4586         break;
4587     default:
4588         gen_op_calc_cc(s);
4589         /* fall through */
4590     case CC_OP_STATIC:
4591         /* The carry flag is the msb of CC; compute into cc_src. */
4592         tcg_gen_extu_i32_i64(cc_src, cc_op);
4593         tcg_gen_shri_i64(cc_src, cc_src, 1);
4594         /* fall through */
4595     case CC_OP_ADDU:
4596         /* Convert carry (1,0) to borrow (0,-1). */
4597         tcg_gen_subi_i64(cc_src, cc_src, 1);
4598         break;
4599     }
4600 }
4601 
4602 static DisasJumpType op_subb32(DisasContext *s, DisasOps *o)
4603 {
4604     compute_borrow(s);
4605 
4606     /* Borrow is {0, -1}, so add to subtract. */
4607     tcg_gen_add_i64(o->out, o->in1, cc_src);
4608     tcg_gen_sub_i64(o->out, o->out, o->in2);
4609     return DISAS_NEXT;
4610 }
4611 
4612 static DisasJumpType op_subb64(DisasContext *s, DisasOps *o)
4613 {
4614     compute_borrow(s);
4615 
4616     /*
4617      * Borrow is {0, -1}, so add to subtract; replicate the
4618      * borrow input to produce 128-bit -1 for the addition.
4619      */
4620     TCGv_i64 zero = tcg_constant_i64(0);
4621     tcg_gen_add2_i64(o->out, cc_src, o->in1, zero, cc_src, cc_src);
4622     tcg_gen_sub2_i64(o->out, cc_src, o->out, cc_src, o->in2, zero);
4623 
4624     return DISAS_NEXT;
4625 }
4626 
4627 static DisasJumpType op_svc(DisasContext *s, DisasOps *o)
4628 {
4629     TCGv_i32 t;
4630 
4631     update_psw_addr(s);
4632     update_cc_op(s);
4633 
4634     t = tcg_constant_i32(get_field(s, i1) & 0xff);
4635     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4636 
4637     t = tcg_constant_i32(s->ilen);
4638     tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4639 
4640     gen_exception(EXCP_SVC);
4641     return DISAS_NORETURN;
4642 }
4643 
4644 static DisasJumpType op_tam(DisasContext *s, DisasOps *o)
4645 {
4646     int cc = 0;
4647 
4648     cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0;
4649     cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0;
4650     gen_op_movi_cc(s, cc);
4651     return DISAS_NEXT;
4652 }
4653 
4654 static DisasJumpType op_tceb(DisasContext *s, DisasOps *o)
4655 {
4656     gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4657     set_cc_static(s);
4658     return DISAS_NEXT;
4659 }
4660 
4661 static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o)
4662 {
4663     gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4664     set_cc_static(s);
4665     return DISAS_NEXT;
4666 }
4667 
4668 static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o)
4669 {
4670     gen_helper_tcxb(cc_op, cpu_env, o->in1_128, o->in2);
4671     set_cc_static(s);
4672     return DISAS_NEXT;
4673 }
4674 
4675 #ifndef CONFIG_USER_ONLY
4676 
4677 static DisasJumpType op_testblock(DisasContext *s, DisasOps *o)
4678 {
4679     gen_helper_testblock(cc_op, cpu_env, o->in2);
4680     set_cc_static(s);
4681     return DISAS_NEXT;
4682 }
4683 
4684 static DisasJumpType op_tprot(DisasContext *s, DisasOps *o)
4685 {
4686     gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2);
4687     set_cc_static(s);
4688     return DISAS_NEXT;
4689 }
4690 
4691 #endif
4692 
4693 static DisasJumpType op_tp(DisasContext *s, DisasOps *o)
4694 {
4695     TCGv_i32 l1 = tcg_constant_i32(get_field(s, l1) + 1);
4696 
4697     gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4698     set_cc_static(s);
4699     return DISAS_NEXT;
4700 }
4701 
4702 static DisasJumpType op_tr(DisasContext *s, DisasOps *o)
4703 {
4704     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4705 
4706     gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4707     set_cc_static(s);
4708     return DISAS_NEXT;
4709 }
4710 
4711 static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
4712 {
4713     TCGv_i128 pair = tcg_temp_new_i128();
4714 
4715     gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
4716     tcg_gen_extr_i128_i64(o->out2, o->out, pair);
4717     set_cc_static(s);
4718     return DISAS_NEXT;
4719 }
4720 
4721 static DisasJumpType op_trt(DisasContext *s, DisasOps *o)
4722 {
4723     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4724 
4725     gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4726     set_cc_static(s);
4727     return DISAS_NEXT;
4728 }
4729 
4730 static DisasJumpType op_trtr(DisasContext *s, DisasOps *o)
4731 {
4732     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4733 
4734     gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4735     set_cc_static(s);
4736     return DISAS_NEXT;
4737 }
4738 
4739 static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
4740 {
4741     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4742     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4743     TCGv_i32 sizes = tcg_constant_i32(s->insn->opc & 3);
4744     TCGv_i32 tst = tcg_temp_new_i32();
4745     int m3 = get_field(s, m3);
4746 
4747     if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4748         m3 = 0;
4749     }
4750     if (m3 & 1) {
4751         tcg_gen_movi_i32(tst, -1);
4752     } else {
4753         tcg_gen_extrl_i64_i32(tst, regs[0]);
4754         if (s->insn->opc & 3) {
4755             tcg_gen_ext8u_i32(tst, tst);
4756         } else {
4757             tcg_gen_ext16u_i32(tst, tst);
4758         }
4759     }
4760     gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4761 
4762     set_cc_static(s);
4763     return DISAS_NEXT;
4764 }
4765 
4766 static DisasJumpType op_ts(DisasContext *s, DisasOps *o)
4767 {
4768     TCGv_i32 t1 = tcg_constant_i32(0xff);
4769 
4770     tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4771     tcg_gen_extract_i32(cc_op, t1, 7, 1);
4772     set_cc_static(s);
4773     return DISAS_NEXT;
4774 }
4775 
4776 static DisasJumpType op_unpk(DisasContext *s, DisasOps *o)
4777 {
4778     TCGv_i32 l = tcg_constant_i32(get_field(s, l1));
4779 
4780     gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4781     return DISAS_NEXT;
4782 }
4783 
4784 static DisasJumpType op_unpka(DisasContext *s, DisasOps *o)
4785 {
4786     int l1 = get_field(s, l1) + 1;
4787     TCGv_i32 l;
4788 
4789     /* The length must not exceed 32 bytes.  */
4790     if (l1 > 32) {
4791         gen_program_exception(s, PGM_SPECIFICATION);
4792         return DISAS_NORETURN;
4793     }
4794     l = tcg_constant_i32(l1);
4795     gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4796     set_cc_static(s);
4797     return DISAS_NEXT;
4798 }
4799 
4800 static DisasJumpType op_unpku(DisasContext *s, DisasOps *o)
4801 {
4802     int l1 = get_field(s, l1) + 1;
4803     TCGv_i32 l;
4804 
4805     /* The length must be even and should not exceed 64 bytes.  */
4806     if ((l1 & 1) || (l1 > 64)) {
4807         gen_program_exception(s, PGM_SPECIFICATION);
4808         return DISAS_NORETURN;
4809     }
4810     l = tcg_constant_i32(l1);
4811     gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4812     set_cc_static(s);
4813     return DISAS_NEXT;
4814 }
4815 
4816 
4817 static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
4818 {
4819     int d1 = get_field(s, d1);
4820     int d2 = get_field(s, d2);
4821     int b1 = get_field(s, b1);
4822     int b2 = get_field(s, b2);
4823     int l = get_field(s, l1);
4824     TCGv_i32 t32;
4825 
4826     o->addr1 = get_address(s, 0, b1, d1);
4827 
4828     /* If the addresses are identical, this is a store/memset of zero.  */
4829     if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4830         o->in2 = tcg_constant_i64(0);
4831 
4832         l++;
4833         while (l >= 8) {
4834             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
4835             l -= 8;
4836             if (l > 0) {
4837                 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4838             }
4839         }
4840         if (l >= 4) {
4841             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
4842             l -= 4;
4843             if (l > 0) {
4844                 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4845             }
4846         }
4847         if (l >= 2) {
4848             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
4849             l -= 2;
4850             if (l > 0) {
4851                 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4852             }
4853         }
4854         if (l) {
4855             tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
4856         }
4857         gen_op_movi_cc(s, 0);
4858         return DISAS_NEXT;
4859     }
4860 
4861     /* But in general we'll defer to a helper.  */
4862     o->in2 = get_address(s, 0, b2, d2);
4863     t32 = tcg_constant_i32(l);
4864     gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4865     set_cc_static(s);
4866     return DISAS_NEXT;
4867 }
4868 
4869 static DisasJumpType op_xor(DisasContext *s, DisasOps *o)
4870 {
4871     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4872     return DISAS_NEXT;
4873 }
4874 
4875 static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
4876 {
4877     int shift = s->insn->data & 0xff;
4878     int size = s->insn->data >> 8;
4879     uint64_t mask = ((1ull << size) - 1) << shift;
4880     TCGv_i64 t = tcg_temp_new_i64();
4881 
4882     tcg_gen_shli_i64(t, o->in2, shift);
4883     tcg_gen_xor_i64(o->out, o->in1, t);
4884 
4885     /* Produce the CC from only the bits manipulated.  */
4886     tcg_gen_andi_i64(cc_dst, o->out, mask);
4887     set_cc_nz_u64(s, cc_dst);
4888     return DISAS_NEXT;
4889 }
4890 
4891 static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
4892 {
4893     o->in1 = tcg_temp_new_i64();
4894 
4895     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4896         tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
4897     } else {
4898         /* Perform the atomic operation in memory. */
4899         tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
4900                                      s->insn->data);
4901     }
4902 
4903     /* Recompute also for atomic case: needed for setting CC. */
4904     tcg_gen_xor_i64(o->out, o->in1, o->in2);
4905 
4906     if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
4907         tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
4908     }
4909     return DISAS_NEXT;
4910 }
4911 
4912 static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
4913 {
4914     o->out = tcg_constant_i64(0);
4915     return DISAS_NEXT;
4916 }
4917 
4918 static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
4919 {
4920     o->out = tcg_constant_i64(0);
4921     o->out2 = o->out;
4922     return DISAS_NEXT;
4923 }
4924 
4925 #ifndef CONFIG_USER_ONLY
4926 static DisasJumpType op_clp(DisasContext *s, DisasOps *o)
4927 {
4928     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4929 
4930     gen_helper_clp(cpu_env, r2);
4931     set_cc_static(s);
4932     return DISAS_NEXT;
4933 }
4934 
4935 static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o)
4936 {
4937     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4938     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4939 
4940     gen_helper_pcilg(cpu_env, r1, r2);
4941     set_cc_static(s);
4942     return DISAS_NEXT;
4943 }
4944 
4945 static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o)
4946 {
4947     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4948     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4949 
4950     gen_helper_pcistg(cpu_env, r1, r2);
4951     set_cc_static(s);
4952     return DISAS_NEXT;
4953 }
4954 
4955 static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o)
4956 {
4957     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4958     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4959 
4960     gen_helper_stpcifc(cpu_env, r1, o->addr1, ar);
4961     set_cc_static(s);
4962     return DISAS_NEXT;
4963 }
4964 
4965 static DisasJumpType op_sic(DisasContext *s, DisasOps *o)
4966 {
4967     gen_helper_sic(cpu_env, o->in1, o->in2);
4968     return DISAS_NEXT;
4969 }
4970 
4971 static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o)
4972 {
4973     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4974     TCGv_i32 r2 = tcg_constant_i32(get_field(s, r2));
4975 
4976     gen_helper_rpcit(cpu_env, r1, r2);
4977     set_cc_static(s);
4978     return DISAS_NEXT;
4979 }
4980 
4981 static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o)
4982 {
4983     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4984     TCGv_i32 r3 = tcg_constant_i32(get_field(s, r3));
4985     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4986 
4987     gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar);
4988     set_cc_static(s);
4989     return DISAS_NEXT;
4990 }
4991 
4992 static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o)
4993 {
4994     TCGv_i32 r1 = tcg_constant_i32(get_field(s, r1));
4995     TCGv_i32 ar = tcg_constant_i32(get_field(s, b2));
4996 
4997     gen_helper_mpcifc(cpu_env, r1, o->addr1, ar);
4998     set_cc_static(s);
4999     return DISAS_NEXT;
5000 }
5001 #endif
5002 
5003 #include "translate_vx.c.inc"
5004 
5005 /* ====================================================================== */
5006 /* The "Cc OUTput" generators.  Given the generated output (and in some cases
5007    the original inputs), update the various cc data structures in order to
5008    be able to compute the new condition code.  */
5009 
5010 static void cout_abs32(DisasContext *s, DisasOps *o)
5011 {
5012     gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
5013 }
5014 
5015 static void cout_abs64(DisasContext *s, DisasOps *o)
5016 {
5017     gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
5018 }
5019 
5020 static void cout_adds32(DisasContext *s, DisasOps *o)
5021 {
5022     gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
5023 }
5024 
5025 static void cout_adds64(DisasContext *s, DisasOps *o)
5026 {
5027     gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
5028 }
5029 
5030 static void cout_addu32(DisasContext *s, DisasOps *o)
5031 {
5032     tcg_gen_shri_i64(cc_src, o->out, 32);
5033     tcg_gen_ext32u_i64(cc_dst, o->out);
5034     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, cc_dst);
5035 }
5036 
5037 static void cout_addu64(DisasContext *s, DisasOps *o)
5038 {
5039     gen_op_update2_cc_i64(s, CC_OP_ADDU, cc_src, o->out);
5040 }
5041 
5042 static void cout_cmps32(DisasContext *s, DisasOps *o)
5043 {
5044     gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
5045 }
5046 
5047 static void cout_cmps64(DisasContext *s, DisasOps *o)
5048 {
5049     gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
5050 }
5051 
5052 static void cout_cmpu32(DisasContext *s, DisasOps *o)
5053 {
5054     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
5055 }
5056 
5057 static void cout_cmpu64(DisasContext *s, DisasOps *o)
5058 {
5059     gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
5060 }
5061 
5062 static void cout_f32(DisasContext *s, DisasOps *o)
5063 {
5064     gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
5065 }
5066 
5067 static void cout_f64(DisasContext *s, DisasOps *o)
5068 {
5069     gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
5070 }
5071 
5072 static void cout_f128(DisasContext *s, DisasOps *o)
5073 {
5074     gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
5075 }
5076 
5077 static void cout_nabs32(DisasContext *s, DisasOps *o)
5078 {
5079     gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
5080 }
5081 
5082 static void cout_nabs64(DisasContext *s, DisasOps *o)
5083 {
5084     gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
5085 }
5086 
5087 static void cout_neg32(DisasContext *s, DisasOps *o)
5088 {
5089     gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
5090 }
5091 
5092 static void cout_neg64(DisasContext *s, DisasOps *o)
5093 {
5094     gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
5095 }
5096 
5097 static void cout_nz32(DisasContext *s, DisasOps *o)
5098 {
5099     tcg_gen_ext32u_i64(cc_dst, o->out);
5100     gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
5101 }
5102 
5103 static void cout_nz64(DisasContext *s, DisasOps *o)
5104 {
5105     gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
5106 }
5107 
5108 static void cout_s32(DisasContext *s, DisasOps *o)
5109 {
5110     gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
5111 }
5112 
5113 static void cout_s64(DisasContext *s, DisasOps *o)
5114 {
5115     gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
5116 }
5117 
5118 static void cout_subs32(DisasContext *s, DisasOps *o)
5119 {
5120     gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
5121 }
5122 
5123 static void cout_subs64(DisasContext *s, DisasOps *o)
5124 {
5125     gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
5126 }
5127 
5128 static void cout_subu32(DisasContext *s, DisasOps *o)
5129 {
5130     tcg_gen_sari_i64(cc_src, o->out, 32);
5131     tcg_gen_ext32u_i64(cc_dst, o->out);
5132     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, cc_dst);
5133 }
5134 
5135 static void cout_subu64(DisasContext *s, DisasOps *o)
5136 {
5137     gen_op_update2_cc_i64(s, CC_OP_SUBU, cc_src, o->out);
5138 }
5139 
5140 static void cout_tm32(DisasContext *s, DisasOps *o)
5141 {
5142     gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
5143 }
5144 
5145 static void cout_tm64(DisasContext *s, DisasOps *o)
5146 {
5147     gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
5148 }
5149 
5150 static void cout_muls32(DisasContext *s, DisasOps *o)
5151 {
5152     gen_op_update1_cc_i64(s, CC_OP_MULS_32, o->out);
5153 }
5154 
5155 static void cout_muls64(DisasContext *s, DisasOps *o)
5156 {
5157     /* out contains "high" part, out2 contains "low" part of 128 bit result */
5158     gen_op_update2_cc_i64(s, CC_OP_MULS_64, o->out, o->out2);
5159 }
5160 
5161 /* ====================================================================== */
5162 /* The "PREParation" generators.  These initialize the DisasOps.OUT fields
5163    with the TCG register to which we will write.  Used in combination with
5164    the "wout" generators, in some cases we need a new temporary, and in
5165    some cases we can write to a TCG global.  */
5166 
5167 static void prep_new(DisasContext *s, DisasOps *o)
5168 {
5169     o->out = tcg_temp_new_i64();
5170 }
5171 #define SPEC_prep_new 0
5172 
5173 static void prep_new_P(DisasContext *s, DisasOps *o)
5174 {
5175     o->out = tcg_temp_new_i64();
5176     o->out2 = tcg_temp_new_i64();
5177 }
5178 #define SPEC_prep_new_P 0
5179 
5180 static void prep_new_x(DisasContext *s, DisasOps *o)
5181 {
5182     o->out_128 = tcg_temp_new_i128();
5183 }
5184 #define SPEC_prep_new_x 0
5185 
5186 static void prep_r1(DisasContext *s, DisasOps *o)
5187 {
5188     o->out = regs[get_field(s, r1)];
5189 }
5190 #define SPEC_prep_r1 0
5191 
5192 static void prep_r1_P(DisasContext *s, DisasOps *o)
5193 {
5194     int r1 = get_field(s, r1);
5195     o->out = regs[r1];
5196     o->out2 = regs[r1 + 1];
5197 }
5198 #define SPEC_prep_r1_P SPEC_r1_even
5199 
5200 static void prep_x1(DisasContext *s, DisasOps *o)
5201 {
5202     o->out_128 = load_freg_128(get_field(s, r1));
5203 }
5204 #define SPEC_prep_x1 SPEC_r1_f128
5205 
5206 /* ====================================================================== */
5207 /* The "Write OUTput" generators.  These generally perform some non-trivial
5208    copy of data to TCG globals, or to main memory.  The trivial cases are
5209    generally handled by having a "prep" generator install the TCG global
5210    as the destination of the operation.  */
5211 
5212 static void wout_r1(DisasContext *s, DisasOps *o)
5213 {
5214     store_reg(get_field(s, r1), o->out);
5215 }
5216 #define SPEC_wout_r1 0
5217 
5218 static void wout_out2_r1(DisasContext *s, DisasOps *o)
5219 {
5220     store_reg(get_field(s, r1), o->out2);
5221 }
5222 #define SPEC_wout_out2_r1 0
5223 
5224 static void wout_r1_8(DisasContext *s, DisasOps *o)
5225 {
5226     int r1 = get_field(s, r1);
5227     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
5228 }
5229 #define SPEC_wout_r1_8 0
5230 
5231 static void wout_r1_16(DisasContext *s, DisasOps *o)
5232 {
5233     int r1 = get_field(s, r1);
5234     tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
5235 }
5236 #define SPEC_wout_r1_16 0
5237 
5238 static void wout_r1_32(DisasContext *s, DisasOps *o)
5239 {
5240     store_reg32_i64(get_field(s, r1), o->out);
5241 }
5242 #define SPEC_wout_r1_32 0
5243 
5244 static void wout_r1_32h(DisasContext *s, DisasOps *o)
5245 {
5246     store_reg32h_i64(get_field(s, r1), o->out);
5247 }
5248 #define SPEC_wout_r1_32h 0
5249 
5250 static void wout_r1_P32(DisasContext *s, DisasOps *o)
5251 {
5252     int r1 = get_field(s, r1);
5253     store_reg32_i64(r1, o->out);
5254     store_reg32_i64(r1 + 1, o->out2);
5255 }
5256 #define SPEC_wout_r1_P32 SPEC_r1_even
5257 
5258 static void wout_r1_D32(DisasContext *s, DisasOps *o)
5259 {
5260     int r1 = get_field(s, r1);
5261     TCGv_i64 t = tcg_temp_new_i64();
5262     store_reg32_i64(r1 + 1, o->out);
5263     tcg_gen_shri_i64(t, o->out, 32);
5264     store_reg32_i64(r1, t);
5265 }
5266 #define SPEC_wout_r1_D32 SPEC_r1_even
5267 
5268 static void wout_r1_D64(DisasContext *s, DisasOps *o)
5269 {
5270     int r1 = get_field(s, r1);
5271     tcg_gen_extr_i128_i64(regs[r1 + 1], regs[r1], o->out_128);
5272 }
5273 #define SPEC_wout_r1_D64 SPEC_r1_even
5274 
5275 static void wout_r3_P32(DisasContext *s, DisasOps *o)
5276 {
5277     int r3 = get_field(s, r3);
5278     store_reg32_i64(r3, o->out);
5279     store_reg32_i64(r3 + 1, o->out2);
5280 }
5281 #define SPEC_wout_r3_P32 SPEC_r3_even
5282 
5283 static void wout_r3_P64(DisasContext *s, DisasOps *o)
5284 {
5285     int r3 = get_field(s, r3);
5286     store_reg(r3, o->out);
5287     store_reg(r3 + 1, o->out2);
5288 }
5289 #define SPEC_wout_r3_P64 SPEC_r3_even
5290 
5291 static void wout_e1(DisasContext *s, DisasOps *o)
5292 {
5293     store_freg32_i64(get_field(s, r1), o->out);
5294 }
5295 #define SPEC_wout_e1 0
5296 
5297 static void wout_f1(DisasContext *s, DisasOps *o)
5298 {
5299     store_freg(get_field(s, r1), o->out);
5300 }
5301 #define SPEC_wout_f1 0
5302 
5303 static void wout_x1(DisasContext *s, DisasOps *o)
5304 {
5305     int f1 = get_field(s, r1);
5306 
5307     /* Split out_128 into out+out2 for cout_f128. */
5308     tcg_debug_assert(o->out == NULL);
5309     o->out = tcg_temp_new_i64();
5310     o->out2 = tcg_temp_new_i64();
5311 
5312     tcg_gen_extr_i128_i64(o->out2, o->out, o->out_128);
5313     store_freg(f1, o->out);
5314     store_freg(f1 + 2, o->out2);
5315 }
5316 #define SPEC_wout_x1 SPEC_r1_f128
5317 
5318 static void wout_x1_P(DisasContext *s, DisasOps *o)
5319 {
5320     int f1 = get_field(s, r1);
5321     store_freg(f1, o->out);
5322     store_freg(f1 + 2, o->out2);
5323 }
5324 #define SPEC_wout_x1_P SPEC_r1_f128
5325 
5326 static void wout_cond_r1r2_32(DisasContext *s, DisasOps *o)
5327 {
5328     if (get_field(s, r1) != get_field(s, r2)) {
5329         store_reg32_i64(get_field(s, r1), o->out);
5330     }
5331 }
5332 #define SPEC_wout_cond_r1r2_32 0
5333 
5334 static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
5335 {
5336     if (get_field(s, r1) != get_field(s, r2)) {
5337         store_freg32_i64(get_field(s, r1), o->out);
5338     }
5339 }
5340 #define SPEC_wout_cond_e1e2 0
5341 
5342 static void wout_m1_8(DisasContext *s, DisasOps *o)
5343 {
5344     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
5345 }
5346 #define SPEC_wout_m1_8 0
5347 
5348 static void wout_m1_16(DisasContext *s, DisasOps *o)
5349 {
5350     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
5351 }
5352 #define SPEC_wout_m1_16 0
5353 
5354 #ifndef CONFIG_USER_ONLY
5355 static void wout_m1_16a(DisasContext *s, DisasOps *o)
5356 {
5357     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN);
5358 }
5359 #define SPEC_wout_m1_16a 0
5360 #endif
5361 
5362 static void wout_m1_32(DisasContext *s, DisasOps *o)
5363 {
5364     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
5365 }
5366 #define SPEC_wout_m1_32 0
5367 
5368 #ifndef CONFIG_USER_ONLY
5369 static void wout_m1_32a(DisasContext *s, DisasOps *o)
5370 {
5371     tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN);
5372 }
5373 #define SPEC_wout_m1_32a 0
5374 #endif
5375 
5376 static void wout_m1_64(DisasContext *s, DisasOps *o)
5377 {
5378     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
5379 }
5380 #define SPEC_wout_m1_64 0
5381 
5382 #ifndef CONFIG_USER_ONLY
5383 static void wout_m1_64a(DisasContext *s, DisasOps *o)
5384 {
5385     tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5386 }
5387 #define SPEC_wout_m1_64a 0
5388 #endif
5389 
5390 static void wout_m2_32(DisasContext *s, DisasOps *o)
5391 {
5392     tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
5393 }
5394 #define SPEC_wout_m2_32 0
5395 
5396 static void wout_in2_r1(DisasContext *s, DisasOps *o)
5397 {
5398     store_reg(get_field(s, r1), o->in2);
5399 }
5400 #define SPEC_wout_in2_r1 0
5401 
5402 static void wout_in2_r1_32(DisasContext *s, DisasOps *o)
5403 {
5404     store_reg32_i64(get_field(s, r1), o->in2);
5405 }
5406 #define SPEC_wout_in2_r1_32 0
5407 
5408 /* ====================================================================== */
5409 /* The "INput 1" generators.  These load the first operand to an insn.  */
5410 
5411 static void in1_r1(DisasContext *s, DisasOps *o)
5412 {
5413     o->in1 = load_reg(get_field(s, r1));
5414 }
5415 #define SPEC_in1_r1 0
5416 
5417 static void in1_r1_o(DisasContext *s, DisasOps *o)
5418 {
5419     o->in1 = regs[get_field(s, r1)];
5420 }
5421 #define SPEC_in1_r1_o 0
5422 
5423 static void in1_r1_32s(DisasContext *s, DisasOps *o)
5424 {
5425     o->in1 = tcg_temp_new_i64();
5426     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1)]);
5427 }
5428 #define SPEC_in1_r1_32s 0
5429 
5430 static void in1_r1_32u(DisasContext *s, DisasOps *o)
5431 {
5432     o->in1 = tcg_temp_new_i64();
5433     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1)]);
5434 }
5435 #define SPEC_in1_r1_32u 0
5436 
5437 static void in1_r1_sr32(DisasContext *s, DisasOps *o)
5438 {
5439     o->in1 = tcg_temp_new_i64();
5440     tcg_gen_shri_i64(o->in1, regs[get_field(s, r1)], 32);
5441 }
5442 #define SPEC_in1_r1_sr32 0
5443 
5444 static void in1_r1p1(DisasContext *s, DisasOps *o)
5445 {
5446     o->in1 = load_reg(get_field(s, r1) + 1);
5447 }
5448 #define SPEC_in1_r1p1 SPEC_r1_even
5449 
5450 static void in1_r1p1_o(DisasContext *s, DisasOps *o)
5451 {
5452     o->in1 = regs[get_field(s, r1) + 1];
5453 }
5454 #define SPEC_in1_r1p1_o SPEC_r1_even
5455 
5456 static void in1_r1p1_32s(DisasContext *s, DisasOps *o)
5457 {
5458     o->in1 = tcg_temp_new_i64();
5459     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r1) + 1]);
5460 }
5461 #define SPEC_in1_r1p1_32s SPEC_r1_even
5462 
5463 static void in1_r1p1_32u(DisasContext *s, DisasOps *o)
5464 {
5465     o->in1 = tcg_temp_new_i64();
5466     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r1) + 1]);
5467 }
5468 #define SPEC_in1_r1p1_32u SPEC_r1_even
5469 
5470 static void in1_r1_D32(DisasContext *s, DisasOps *o)
5471 {
5472     int r1 = get_field(s, r1);
5473     o->in1 = tcg_temp_new_i64();
5474     tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5475 }
5476 #define SPEC_in1_r1_D32 SPEC_r1_even
5477 
5478 static void in1_r2(DisasContext *s, DisasOps *o)
5479 {
5480     o->in1 = load_reg(get_field(s, r2));
5481 }
5482 #define SPEC_in1_r2 0
5483 
5484 static void in1_r2_sr32(DisasContext *s, DisasOps *o)
5485 {
5486     o->in1 = tcg_temp_new_i64();
5487     tcg_gen_shri_i64(o->in1, regs[get_field(s, r2)], 32);
5488 }
5489 #define SPEC_in1_r2_sr32 0
5490 
5491 static void in1_r2_32u(DisasContext *s, DisasOps *o)
5492 {
5493     o->in1 = tcg_temp_new_i64();
5494     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r2)]);
5495 }
5496 #define SPEC_in1_r2_32u 0
5497 
5498 static void in1_r3(DisasContext *s, DisasOps *o)
5499 {
5500     o->in1 = load_reg(get_field(s, r3));
5501 }
5502 #define SPEC_in1_r3 0
5503 
5504 static void in1_r3_o(DisasContext *s, DisasOps *o)
5505 {
5506     o->in1 = regs[get_field(s, r3)];
5507 }
5508 #define SPEC_in1_r3_o 0
5509 
5510 static void in1_r3_32s(DisasContext *s, DisasOps *o)
5511 {
5512     o->in1 = tcg_temp_new_i64();
5513     tcg_gen_ext32s_i64(o->in1, regs[get_field(s, r3)]);
5514 }
5515 #define SPEC_in1_r3_32s 0
5516 
5517 static void in1_r3_32u(DisasContext *s, DisasOps *o)
5518 {
5519     o->in1 = tcg_temp_new_i64();
5520     tcg_gen_ext32u_i64(o->in1, regs[get_field(s, r3)]);
5521 }
5522 #define SPEC_in1_r3_32u 0
5523 
5524 static void in1_r3_D32(DisasContext *s, DisasOps *o)
5525 {
5526     int r3 = get_field(s, r3);
5527     o->in1 = tcg_temp_new_i64();
5528     tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5529 }
5530 #define SPEC_in1_r3_D32 SPEC_r3_even
5531 
5532 static void in1_r3_sr32(DisasContext *s, DisasOps *o)
5533 {
5534     o->in1 = tcg_temp_new_i64();
5535     tcg_gen_shri_i64(o->in1, regs[get_field(s, r3)], 32);
5536 }
5537 #define SPEC_in1_r3_sr32 0
5538 
5539 static void in1_e1(DisasContext *s, DisasOps *o)
5540 {
5541     o->in1 = load_freg32_i64(get_field(s, r1));
5542 }
5543 #define SPEC_in1_e1 0
5544 
5545 static void in1_f1(DisasContext *s, DisasOps *o)
5546 {
5547     o->in1 = load_freg(get_field(s, r1));
5548 }
5549 #define SPEC_in1_f1 0
5550 
5551 static void in1_x1(DisasContext *s, DisasOps *o)
5552 {
5553     o->in1_128 = load_freg_128(get_field(s, r1));
5554 }
5555 #define SPEC_in1_x1 SPEC_r1_f128
5556 
5557 /* Load the high double word of an extended (128-bit) format FP number */
5558 static void in1_x2h(DisasContext *s, DisasOps *o)
5559 {
5560     o->in1 = load_freg(get_field(s, r2));
5561 }
5562 #define SPEC_in1_x2h SPEC_r2_f128
5563 
5564 static void in1_f3(DisasContext *s, DisasOps *o)
5565 {
5566     o->in1 = load_freg(get_field(s, r3));
5567 }
5568 #define SPEC_in1_f3 0
5569 
5570 static void in1_la1(DisasContext *s, DisasOps *o)
5571 {
5572     o->addr1 = get_address(s, 0, get_field(s, b1), get_field(s, d1));
5573 }
5574 #define SPEC_in1_la1 0
5575 
5576 static void in1_la2(DisasContext *s, DisasOps *o)
5577 {
5578     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5579     o->addr1 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5580 }
5581 #define SPEC_in1_la2 0
5582 
5583 static void in1_m1_8u(DisasContext *s, DisasOps *o)
5584 {
5585     in1_la1(s, o);
5586     o->in1 = tcg_temp_new_i64();
5587     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
5588 }
5589 #define SPEC_in1_m1_8u 0
5590 
5591 static void in1_m1_16s(DisasContext *s, DisasOps *o)
5592 {
5593     in1_la1(s, o);
5594     o->in1 = tcg_temp_new_i64();
5595     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
5596 }
5597 #define SPEC_in1_m1_16s 0
5598 
5599 static void in1_m1_16u(DisasContext *s, DisasOps *o)
5600 {
5601     in1_la1(s, o);
5602     o->in1 = tcg_temp_new_i64();
5603     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
5604 }
5605 #define SPEC_in1_m1_16u 0
5606 
5607 static void in1_m1_32s(DisasContext *s, DisasOps *o)
5608 {
5609     in1_la1(s, o);
5610     o->in1 = tcg_temp_new_i64();
5611     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
5612 }
5613 #define SPEC_in1_m1_32s 0
5614 
5615 static void in1_m1_32u(DisasContext *s, DisasOps *o)
5616 {
5617     in1_la1(s, o);
5618     o->in1 = tcg_temp_new_i64();
5619     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
5620 }
5621 #define SPEC_in1_m1_32u 0
5622 
5623 static void in1_m1_64(DisasContext *s, DisasOps *o)
5624 {
5625     in1_la1(s, o);
5626     o->in1 = tcg_temp_new_i64();
5627     tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
5628 }
5629 #define SPEC_in1_m1_64 0
5630 
5631 /* ====================================================================== */
5632 /* The "INput 2" generators.  These load the second operand to an insn.  */
5633 
5634 static void in2_r1_o(DisasContext *s, DisasOps *o)
5635 {
5636     o->in2 = regs[get_field(s, r1)];
5637 }
5638 #define SPEC_in2_r1_o 0
5639 
5640 static void in2_r1_16u(DisasContext *s, DisasOps *o)
5641 {
5642     o->in2 = tcg_temp_new_i64();
5643     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r1)]);
5644 }
5645 #define SPEC_in2_r1_16u 0
5646 
5647 static void in2_r1_32u(DisasContext *s, DisasOps *o)
5648 {
5649     o->in2 = tcg_temp_new_i64();
5650     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r1)]);
5651 }
5652 #define SPEC_in2_r1_32u 0
5653 
5654 static void in2_r1_D32(DisasContext *s, DisasOps *o)
5655 {
5656     int r1 = get_field(s, r1);
5657     o->in2 = tcg_temp_new_i64();
5658     tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5659 }
5660 #define SPEC_in2_r1_D32 SPEC_r1_even
5661 
5662 static void in2_r2(DisasContext *s, DisasOps *o)
5663 {
5664     o->in2 = load_reg(get_field(s, r2));
5665 }
5666 #define SPEC_in2_r2 0
5667 
5668 static void in2_r2_o(DisasContext *s, DisasOps *o)
5669 {
5670     o->in2 = regs[get_field(s, r2)];
5671 }
5672 #define SPEC_in2_r2_o 0
5673 
5674 static void in2_r2_nz(DisasContext *s, DisasOps *o)
5675 {
5676     int r2 = get_field(s, r2);
5677     if (r2 != 0) {
5678         o->in2 = load_reg(r2);
5679     }
5680 }
5681 #define SPEC_in2_r2_nz 0
5682 
5683 static void in2_r2_8s(DisasContext *s, DisasOps *o)
5684 {
5685     o->in2 = tcg_temp_new_i64();
5686     tcg_gen_ext8s_i64(o->in2, regs[get_field(s, r2)]);
5687 }
5688 #define SPEC_in2_r2_8s 0
5689 
5690 static void in2_r2_8u(DisasContext *s, DisasOps *o)
5691 {
5692     o->in2 = tcg_temp_new_i64();
5693     tcg_gen_ext8u_i64(o->in2, regs[get_field(s, r2)]);
5694 }
5695 #define SPEC_in2_r2_8u 0
5696 
5697 static void in2_r2_16s(DisasContext *s, DisasOps *o)
5698 {
5699     o->in2 = tcg_temp_new_i64();
5700     tcg_gen_ext16s_i64(o->in2, regs[get_field(s, r2)]);
5701 }
5702 #define SPEC_in2_r2_16s 0
5703 
5704 static void in2_r2_16u(DisasContext *s, DisasOps *o)
5705 {
5706     o->in2 = tcg_temp_new_i64();
5707     tcg_gen_ext16u_i64(o->in2, regs[get_field(s, r2)]);
5708 }
5709 #define SPEC_in2_r2_16u 0
5710 
5711 static void in2_r3(DisasContext *s, DisasOps *o)
5712 {
5713     o->in2 = load_reg(get_field(s, r3));
5714 }
5715 #define SPEC_in2_r3 0
5716 
5717 static void in2_r3_D64(DisasContext *s, DisasOps *o)
5718 {
5719     int r3 = get_field(s, r3);
5720     o->in2_128 = tcg_temp_new_i128();
5721     tcg_gen_concat_i64_i128(o->in2_128, regs[r3 + 1], regs[r3]);
5722 }
5723 #define SPEC_in2_r3_D64 SPEC_r3_even
5724 
5725 static void in2_r3_sr32(DisasContext *s, DisasOps *o)
5726 {
5727     o->in2 = tcg_temp_new_i64();
5728     tcg_gen_shri_i64(o->in2, regs[get_field(s, r3)], 32);
5729 }
5730 #define SPEC_in2_r3_sr32 0
5731 
5732 static void in2_r3_32u(DisasContext *s, DisasOps *o)
5733 {
5734     o->in2 = tcg_temp_new_i64();
5735     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r3)]);
5736 }
5737 #define SPEC_in2_r3_32u 0
5738 
5739 static void in2_r2_32s(DisasContext *s, DisasOps *o)
5740 {
5741     o->in2 = tcg_temp_new_i64();
5742     tcg_gen_ext32s_i64(o->in2, regs[get_field(s, r2)]);
5743 }
5744 #define SPEC_in2_r2_32s 0
5745 
5746 static void in2_r2_32u(DisasContext *s, DisasOps *o)
5747 {
5748     o->in2 = tcg_temp_new_i64();
5749     tcg_gen_ext32u_i64(o->in2, regs[get_field(s, r2)]);
5750 }
5751 #define SPEC_in2_r2_32u 0
5752 
5753 static void in2_r2_sr32(DisasContext *s, DisasOps *o)
5754 {
5755     o->in2 = tcg_temp_new_i64();
5756     tcg_gen_shri_i64(o->in2, regs[get_field(s, r2)], 32);
5757 }
5758 #define SPEC_in2_r2_sr32 0
5759 
5760 static void in2_e2(DisasContext *s, DisasOps *o)
5761 {
5762     o->in2 = load_freg32_i64(get_field(s, r2));
5763 }
5764 #define SPEC_in2_e2 0
5765 
5766 static void in2_f2(DisasContext *s, DisasOps *o)
5767 {
5768     o->in2 = load_freg(get_field(s, r2));
5769 }
5770 #define SPEC_in2_f2 0
5771 
5772 static void in2_x2(DisasContext *s, DisasOps *o)
5773 {
5774     o->in2_128 = load_freg_128(get_field(s, r2));
5775 }
5776 #define SPEC_in2_x2 SPEC_r2_f128
5777 
5778 /* Load the low double word of an extended (128-bit) format FP number */
5779 static void in2_x2l(DisasContext *s, DisasOps *o)
5780 {
5781     o->in2 = load_freg(get_field(s, r2) + 2);
5782 }
5783 #define SPEC_in2_x2l SPEC_r2_f128
5784 
5785 static void in2_ra2(DisasContext *s, DisasOps *o)
5786 {
5787     int r2 = get_field(s, r2);
5788 
5789     /* Note: *don't* treat !r2 as 0, use the reg value. */
5790     o->in2 = tcg_temp_new_i64();
5791     gen_addi_and_wrap_i64(s, o->in2, regs[r2], 0);
5792 }
5793 #define SPEC_in2_ra2 0
5794 
5795 static void in2_a2(DisasContext *s, DisasOps *o)
5796 {
5797     int x2 = have_field(s, x2) ? get_field(s, x2) : 0;
5798     o->in2 = get_address(s, x2, get_field(s, b2), get_field(s, d2));
5799 }
5800 #define SPEC_in2_a2 0
5801 
5802 static TCGv gen_ri2(DisasContext *s)
5803 {
5804     TCGv ri2 = NULL;
5805     bool is_imm;
5806     int imm;
5807 
5808     disas_jdest(s, i2, is_imm, imm, ri2);
5809     if (is_imm) {
5810         ri2 = tcg_constant_i64(s->base.pc_next + imm * 2);
5811     }
5812 
5813     return ri2;
5814 }
5815 
5816 static void in2_ri2(DisasContext *s, DisasOps *o)
5817 {
5818     o->in2 = gen_ri2(s);
5819 }
5820 #define SPEC_in2_ri2 0
5821 
5822 static void in2_sh(DisasContext *s, DisasOps *o)
5823 {
5824     int b2 = get_field(s, b2);
5825     int d2 = get_field(s, d2);
5826 
5827     if (b2 == 0) {
5828         o->in2 = tcg_constant_i64(d2 & 0x3f);
5829     } else {
5830         o->in2 = get_address(s, 0, b2, d2);
5831         tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
5832     }
5833 }
5834 #define SPEC_in2_sh 0
5835 
5836 static void in2_m2_8u(DisasContext *s, DisasOps *o)
5837 {
5838     in2_a2(s, o);
5839     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
5840 }
5841 #define SPEC_in2_m2_8u 0
5842 
5843 static void in2_m2_16s(DisasContext *s, DisasOps *o)
5844 {
5845     in2_a2(s, o);
5846     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
5847 }
5848 #define SPEC_in2_m2_16s 0
5849 
5850 static void in2_m2_16u(DisasContext *s, DisasOps *o)
5851 {
5852     in2_a2(s, o);
5853     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
5854 }
5855 #define SPEC_in2_m2_16u 0
5856 
5857 static void in2_m2_32s(DisasContext *s, DisasOps *o)
5858 {
5859     in2_a2(s, o);
5860     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
5861 }
5862 #define SPEC_in2_m2_32s 0
5863 
5864 static void in2_m2_32u(DisasContext *s, DisasOps *o)
5865 {
5866     in2_a2(s, o);
5867     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
5868 }
5869 #define SPEC_in2_m2_32u 0
5870 
5871 #ifndef CONFIG_USER_ONLY
5872 static void in2_m2_32ua(DisasContext *s, DisasOps *o)
5873 {
5874     in2_a2(s, o);
5875     tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN);
5876 }
5877 #define SPEC_in2_m2_32ua 0
5878 #endif
5879 
5880 static void in2_m2_64(DisasContext *s, DisasOps *o)
5881 {
5882     in2_a2(s, o);
5883     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5884 }
5885 #define SPEC_in2_m2_64 0
5886 
5887 static void in2_m2_64w(DisasContext *s, DisasOps *o)
5888 {
5889     in2_a2(s, o);
5890     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
5891     gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
5892 }
5893 #define SPEC_in2_m2_64w 0
5894 
5895 #ifndef CONFIG_USER_ONLY
5896 static void in2_m2_64a(DisasContext *s, DisasOps *o)
5897 {
5898     in2_a2(s, o);
5899     tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
5900 }
5901 #define SPEC_in2_m2_64a 0
5902 #endif
5903 
5904 static void in2_mri2_16s(DisasContext *s, DisasOps *o)
5905 {
5906     o->in2 = tcg_temp_new_i64();
5907     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
5908 }
5909 #define SPEC_in2_mri2_16s 0
5910 
5911 static void in2_mri2_16u(DisasContext *s, DisasOps *o)
5912 {
5913     o->in2 = tcg_temp_new_i64();
5914     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
5915 }
5916 #define SPEC_in2_mri2_16u 0
5917 
5918 static void in2_mri2_32s(DisasContext *s, DisasOps *o)
5919 {
5920     o->in2 = tcg_temp_new_i64();
5921     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5922                        MO_TESL | MO_ALIGN);
5923 }
5924 #define SPEC_in2_mri2_32s 0
5925 
5926 static void in2_mri2_32u(DisasContext *s, DisasOps *o)
5927 {
5928     o->in2 = tcg_temp_new_i64();
5929     tcg_gen_qemu_ld_tl(o->in2, gen_ri2(s), get_mem_index(s),
5930                        MO_TEUL | MO_ALIGN);
5931 }
5932 #define SPEC_in2_mri2_32u 0
5933 
5934 static void in2_mri2_64(DisasContext *s, DisasOps *o)
5935 {
5936     o->in2 = tcg_temp_new_i64();
5937     tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s),
5938                         MO_TEUQ | MO_ALIGN);
5939 }
5940 #define SPEC_in2_mri2_64 0
5941 
5942 static void in2_i2(DisasContext *s, DisasOps *o)
5943 {
5944     o->in2 = tcg_constant_i64(get_field(s, i2));
5945 }
5946 #define SPEC_in2_i2 0
5947 
5948 static void in2_i2_8u(DisasContext *s, DisasOps *o)
5949 {
5950     o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
5951 }
5952 #define SPEC_in2_i2_8u 0
5953 
5954 static void in2_i2_16u(DisasContext *s, DisasOps *o)
5955 {
5956     o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
5957 }
5958 #define SPEC_in2_i2_16u 0
5959 
5960 static void in2_i2_32u(DisasContext *s, DisasOps *o)
5961 {
5962     o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
5963 }
5964 #define SPEC_in2_i2_32u 0
5965 
5966 static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
5967 {
5968     uint64_t i2 = (uint16_t)get_field(s, i2);
5969     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5970 }
5971 #define SPEC_in2_i2_16u_shl 0
5972 
5973 static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
5974 {
5975     uint64_t i2 = (uint32_t)get_field(s, i2);
5976     o->in2 = tcg_constant_i64(i2 << s->insn->data);
5977 }
5978 #define SPEC_in2_i2_32u_shl 0
5979 
5980 #ifndef CONFIG_USER_ONLY
5981 static void in2_insn(DisasContext *s, DisasOps *o)
5982 {
5983     o->in2 = tcg_constant_i64(s->fields.raw_insn);
5984 }
5985 #define SPEC_in2_insn 0
5986 #endif
5987 
5988 /* ====================================================================== */
5989 
5990 /* Find opc within the table of insns.  This is formulated as a switch
5991    statement so that (1) we get compile-time notice of cut-paste errors
5992    for duplicated opcodes, and (2) the compiler generates the binary
5993    search tree, rather than us having to post-process the table.  */
5994 
5995 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5996     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0)
5997 
5998 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5999     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0)
6000 
6001 #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \
6002     E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL)
6003 
6004 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM,
6005 
6006 enum DisasInsnEnum {
6007 #include "insn-data.h.inc"
6008 };
6009 
6010 #undef E
6011 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) {                   \
6012     .opc = OPC,                                                             \
6013     .flags = FL,                                                            \
6014     .fmt = FMT_##FT,                                                        \
6015     .fac = FAC_##FC,                                                        \
6016     .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W,  \
6017     .name = #NM,                                                            \
6018     .help_in1 = in1_##I1,                                                   \
6019     .help_in2 = in2_##I2,                                                   \
6020     .help_prep = prep_##P,                                                  \
6021     .help_wout = wout_##W,                                                  \
6022     .help_cout = cout_##CC,                                                 \
6023     .help_op = op_##OP,                                                     \
6024     .data = D                                                               \
6025  },
6026 
6027 /* Allow 0 to be used for NULL in the table below.  */
6028 #define in1_0  NULL
6029 #define in2_0  NULL
6030 #define prep_0  NULL
6031 #define wout_0  NULL
6032 #define cout_0  NULL
6033 #define op_0  NULL
6034 
6035 #define SPEC_in1_0 0
6036 #define SPEC_in2_0 0
6037 #define SPEC_prep_0 0
6038 #define SPEC_wout_0 0
6039 
6040 /* Give smaller names to the various facilities.  */
6041 #define FAC_Z           S390_FEAT_ZARCH
6042 #define FAC_CASS        S390_FEAT_COMPARE_AND_SWAP_AND_STORE
6043 #define FAC_DFP         S390_FEAT_DFP
6044 #define FAC_DFPR        S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* DFP-rounding */
6045 #define FAC_DO          S390_FEAT_STFLE_45 /* distinct-operands */
6046 #define FAC_EE          S390_FEAT_EXECUTE_EXT
6047 #define FAC_EI          S390_FEAT_EXTENDED_IMMEDIATE
6048 #define FAC_FPE         S390_FEAT_FLOATING_POINT_EXT
6049 #define FAC_FPSSH       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPS-sign-handling */
6050 #define FAC_FPRGR       S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* FPR-GR-transfer */
6051 #define FAC_GIE         S390_FEAT_GENERAL_INSTRUCTIONS_EXT
6052 #define FAC_HFP_MA      S390_FEAT_HFP_MADDSUB
6053 #define FAC_HW          S390_FEAT_STFLE_45 /* high-word */
6054 #define FAC_IEEEE_SIM   S390_FEAT_FLOATING_POINT_SUPPORT_ENH /* IEEE-exception-simulation */
6055 #define FAC_MIE         S390_FEAT_STFLE_49 /* misc-instruction-extensions */
6056 #define FAC_LAT         S390_FEAT_STFLE_49 /* load-and-trap */
6057 #define FAC_LOC         S390_FEAT_STFLE_45 /* load/store on condition 1 */
6058 #define FAC_LOC2        S390_FEAT_STFLE_53 /* load/store on condition 2 */
6059 #define FAC_LD          S390_FEAT_LONG_DISPLACEMENT
6060 #define FAC_PC          S390_FEAT_STFLE_45 /* population count */
6061 #define FAC_SCF         S390_FEAT_STORE_CLOCK_FAST
6062 #define FAC_SFLE        S390_FEAT_STFLE
6063 #define FAC_ILA         S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
6064 #define FAC_MVCOS       S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
6065 #define FAC_LPP         S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
6066 #define FAC_DAT_ENH     S390_FEAT_DAT_ENH
6067 #define FAC_E2          S390_FEAT_EXTENDED_TRANSLATION_2
6068 #define FAC_EH          S390_FEAT_STFLE_49 /* execution-hint */
6069 #define FAC_PPA         S390_FEAT_STFLE_49 /* processor-assist */
6070 #define FAC_LZRB        S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
6071 #define FAC_ETF3        S390_FEAT_EXTENDED_TRANSLATION_3
6072 #define FAC_MSA         S390_FEAT_MSA /* message-security-assist facility */
6073 #define FAC_MSA3        S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
6074 #define FAC_MSA4        S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
6075 #define FAC_MSA5        S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
6076 #define FAC_MSA8        S390_FEAT_MSA_EXT_8 /* msa-extension-8 facility */
6077 #define FAC_ECT         S390_FEAT_EXTRACT_CPU_TIME
6078 #define FAC_PCI         S390_FEAT_ZPCI /* z/PCI facility */
6079 #define FAC_AIS         S390_FEAT_ADAPTER_INT_SUPPRESSION
6080 #define FAC_V           S390_FEAT_VECTOR /* vector facility */
6081 #define FAC_VE          S390_FEAT_VECTOR_ENH  /* vector enhancements facility 1 */
6082 #define FAC_VE2         S390_FEAT_VECTOR_ENH2 /* vector enhancements facility 2 */
6083 #define FAC_MIE2        S390_FEAT_MISC_INSTRUCTION_EXT2 /* miscellaneous-instruction-extensions facility 2 */
6084 #define FAC_MIE3        S390_FEAT_MISC_INSTRUCTION_EXT3 /* miscellaneous-instruction-extensions facility 3 */
6085 
6086 static const DisasInsn insn_info[] = {
6087 #include "insn-data.h.inc"
6088 };
6089 
6090 #undef E
6091 #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \
6092     case OPC: return &insn_info[insn_ ## NM];
6093 
6094 static const DisasInsn *lookup_opc(uint16_t opc)
6095 {
6096     switch (opc) {
6097 #include "insn-data.h.inc"
6098     default:
6099         return NULL;
6100     }
6101 }
6102 
6103 #undef F
6104 #undef E
6105 #undef D
6106 #undef C
6107 
6108 /* Extract a field from the insn.  The INSN should be left-aligned in
6109    the uint64_t so that we can more easily utilize the big-bit-endian
6110    definitions we extract from the Principals of Operation.  */
6111 
6112 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
6113 {
6114     uint32_t r, m;
6115 
6116     if (f->size == 0) {
6117         return;
6118     }
6119 
6120     /* Zero extract the field from the insn.  */
6121     r = (insn << f->beg) >> (64 - f->size);
6122 
6123     /* Sign-extend, or un-swap the field as necessary.  */
6124     switch (f->type) {
6125     case 0: /* unsigned */
6126         break;
6127     case 1: /* signed */
6128         assert(f->size <= 32);
6129         m = 1u << (f->size - 1);
6130         r = (r ^ m) - m;
6131         break;
6132     case 2: /* dl+dh split, signed 20 bit. */
6133         r = ((int8_t)r << 12) | (r >> 8);
6134         break;
6135     case 3: /* MSB stored in RXB */
6136         g_assert(f->size == 4);
6137         switch (f->beg) {
6138         case 8:
6139             r |= extract64(insn, 63 - 36, 1) << 4;
6140             break;
6141         case 12:
6142             r |= extract64(insn, 63 - 37, 1) << 4;
6143             break;
6144         case 16:
6145             r |= extract64(insn, 63 - 38, 1) << 4;
6146             break;
6147         case 32:
6148             r |= extract64(insn, 63 - 39, 1) << 4;
6149             break;
6150         default:
6151             g_assert_not_reached();
6152         }
6153         break;
6154     default:
6155         abort();
6156     }
6157 
6158     /*
6159      * Validate that the "compressed" encoding we selected above is valid.
6160      * I.e. we haven't made two different original fields overlap.
6161      */
6162     assert(((o->presentC >> f->indexC) & 1) == 0);
6163     o->presentC |= 1 << f->indexC;
6164     o->presentO |= 1 << f->indexO;
6165 
6166     o->c[f->indexC] = r;
6167 }
6168 
6169 /* Lookup the insn at the current PC, extracting the operands into O and
6170    returning the info struct for the insn.  Returns NULL for invalid insn.  */
6171 
6172 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
6173 {
6174     uint64_t insn, pc = s->base.pc_next;
6175     int op, op2, ilen;
6176     const DisasInsn *info;
6177 
6178     if (unlikely(s->ex_value)) {
6179         /* Drop the EX data now, so that it's clear on exception paths.  */
6180         tcg_gen_st_i64(tcg_constant_i64(0), cpu_env,
6181                        offsetof(CPUS390XState, ex_value));
6182 
6183         /* Extract the values saved by EXECUTE.  */
6184         insn = s->ex_value & 0xffffffffffff0000ull;
6185         ilen = s->ex_value & 0xf;
6186 
6187         /* Register insn bytes with translator so plugins work. */
6188         for (int i = 0; i < ilen; i++) {
6189             uint8_t byte = extract64(insn, 56 - (i * 8), 8);
6190             translator_fake_ldb(byte, pc + i);
6191         }
6192         op = insn >> 56;
6193     } else {
6194         insn = ld_code2(env, s, pc);
6195         op = (insn >> 8) & 0xff;
6196         ilen = get_ilen(op);
6197         switch (ilen) {
6198         case 2:
6199             insn = insn << 48;
6200             break;
6201         case 4:
6202             insn = ld_code4(env, s, pc) << 32;
6203             break;
6204         case 6:
6205             insn = (insn << 48) | (ld_code4(env, s, pc + 2) << 16);
6206             break;
6207         default:
6208             g_assert_not_reached();
6209         }
6210     }
6211     s->pc_tmp = s->base.pc_next + ilen;
6212     s->ilen = ilen;
6213 
6214     /* We can't actually determine the insn format until we've looked up
6215        the full insn opcode.  Which we can't do without locating the
6216        secondary opcode.  Assume by default that OP2 is at bit 40; for
6217        those smaller insns that don't actually have a secondary opcode
6218        this will correctly result in OP2 = 0. */
6219     switch (op) {
6220     case 0x01: /* E */
6221     case 0x80: /* S */
6222     case 0x82: /* S */
6223     case 0x93: /* S */
6224     case 0xb2: /* S, RRF, RRE, IE */
6225     case 0xb3: /* RRE, RRD, RRF */
6226     case 0xb9: /* RRE, RRF */
6227     case 0xe5: /* SSE, SIL */
6228         op2 = (insn << 8) >> 56;
6229         break;
6230     case 0xa5: /* RI */
6231     case 0xa7: /* RI */
6232     case 0xc0: /* RIL */
6233     case 0xc2: /* RIL */
6234     case 0xc4: /* RIL */
6235     case 0xc6: /* RIL */
6236     case 0xc8: /* SSF */
6237     case 0xcc: /* RIL */
6238         op2 = (insn << 12) >> 60;
6239         break;
6240     case 0xc5: /* MII */
6241     case 0xc7: /* SMI */
6242     case 0xd0 ... 0xdf: /* SS */
6243     case 0xe1: /* SS */
6244     case 0xe2: /* SS */
6245     case 0xe8: /* SS */
6246     case 0xe9: /* SS */
6247     case 0xea: /* SS */
6248     case 0xee ... 0xf3: /* SS */
6249     case 0xf8 ... 0xfd: /* SS */
6250         op2 = 0;
6251         break;
6252     default:
6253         op2 = (insn << 40) >> 56;
6254         break;
6255     }
6256 
6257     memset(&s->fields, 0, sizeof(s->fields));
6258     s->fields.raw_insn = insn;
6259     s->fields.op = op;
6260     s->fields.op2 = op2;
6261 
6262     /* Lookup the instruction.  */
6263     info = lookup_opc(op << 8 | op2);
6264     s->insn = info;
6265 
6266     /* If we found it, extract the operands.  */
6267     if (info != NULL) {
6268         DisasFormat fmt = info->fmt;
6269         int i;
6270 
6271         for (i = 0; i < NUM_C_FIELD; ++i) {
6272             extract_field(&s->fields, &format_info[fmt].op[i], insn);
6273         }
6274     }
6275     return info;
6276 }
6277 
6278 static bool is_afp_reg(int reg)
6279 {
6280     return reg % 2 || reg > 6;
6281 }
6282 
6283 static bool is_fp_pair(int reg)
6284 {
6285     /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */
6286     return !(reg & 0x2);
6287 }
6288 
6289 static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
6290 {
6291     const DisasInsn *insn;
6292     DisasJumpType ret = DISAS_NEXT;
6293     DisasOps o = {};
6294     bool icount = false;
6295 
6296     /* Search for the insn in the table.  */
6297     insn = extract_insn(env, s);
6298 
6299     /* Update insn_start now that we know the ILEN.  */
6300     tcg_set_insn_start_param(s->insn_start, 2, s->ilen);
6301 
6302     /* Not found means unimplemented/illegal opcode.  */
6303     if (insn == NULL) {
6304         qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
6305                       s->fields.op, s->fields.op2);
6306         gen_illegal_opcode(s);
6307         ret = DISAS_NORETURN;
6308         goto out;
6309     }
6310 
6311 #ifndef CONFIG_USER_ONLY
6312     if (s->base.tb->flags & FLAG_MASK_PER) {
6313         TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
6314         gen_helper_per_ifetch(cpu_env, addr);
6315     }
6316 #endif
6317 
6318     /* process flags */
6319     if (insn->flags) {
6320         /* privileged instruction */
6321         if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) {
6322             gen_program_exception(s, PGM_PRIVILEGED);
6323             ret = DISAS_NORETURN;
6324             goto out;
6325         }
6326 
6327         /* if AFP is not enabled, instructions and registers are forbidden */
6328         if (!(s->base.tb->flags & FLAG_MASK_AFP)) {
6329             uint8_t dxc = 0;
6330 
6331             if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(s, r1))) {
6332                 dxc = 1;
6333             }
6334             if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(s, r2))) {
6335                 dxc = 1;
6336             }
6337             if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(s, r3))) {
6338                 dxc = 1;
6339             }
6340             if (insn->flags & IF_BFP) {
6341                 dxc = 2;
6342             }
6343             if (insn->flags & IF_DFP) {
6344                 dxc = 3;
6345             }
6346             if (insn->flags & IF_VEC) {
6347                 dxc = 0xfe;
6348             }
6349             if (dxc) {
6350                 gen_data_exception(dxc);
6351                 ret = DISAS_NORETURN;
6352                 goto out;
6353             }
6354         }
6355 
6356         /* if vector instructions not enabled, executing them is forbidden */
6357         if (insn->flags & IF_VEC) {
6358             if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) {
6359                 gen_data_exception(0xfe);
6360                 ret = DISAS_NORETURN;
6361                 goto out;
6362             }
6363         }
6364 
6365         /* input/output is the special case for icount mode */
6366         if (unlikely(insn->flags & IF_IO)) {
6367             icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
6368             if (icount) {
6369                 gen_io_start();
6370             }
6371         }
6372     }
6373 
6374     /* Check for insn specification exceptions.  */
6375     if (insn->spec) {
6376         if ((insn->spec & SPEC_r1_even && get_field(s, r1) & 1) ||
6377             (insn->spec & SPEC_r2_even && get_field(s, r2) & 1) ||
6378             (insn->spec & SPEC_r3_even && get_field(s, r3) & 1) ||
6379             (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(s, r1))) ||
6380             (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(s, r2)))) {
6381             gen_program_exception(s, PGM_SPECIFICATION);
6382             ret = DISAS_NORETURN;
6383             goto out;
6384         }
6385     }
6386 
6387     /* Implement the instruction.  */
6388     if (insn->help_in1) {
6389         insn->help_in1(s, &o);
6390     }
6391     if (insn->help_in2) {
6392         insn->help_in2(s, &o);
6393     }
6394     if (insn->help_prep) {
6395         insn->help_prep(s, &o);
6396     }
6397     if (insn->help_op) {
6398         ret = insn->help_op(s, &o);
6399     }
6400     if (ret != DISAS_NORETURN) {
6401         if (insn->help_wout) {
6402             insn->help_wout(s, &o);
6403         }
6404         if (insn->help_cout) {
6405             insn->help_cout(s, &o);
6406         }
6407     }
6408 
6409     /* io should be the last instruction in tb when icount is enabled */
6410     if (unlikely(icount && ret == DISAS_NEXT)) {
6411         ret = DISAS_TOO_MANY;
6412     }
6413 
6414 #ifndef CONFIG_USER_ONLY
6415     if (s->base.tb->flags & FLAG_MASK_PER) {
6416         /* An exception might be triggered, save PSW if not already done.  */
6417         if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) {
6418             tcg_gen_movi_i64(psw_addr, s->pc_tmp);
6419         }
6420 
6421         /* Call the helper to check for a possible PER exception.  */
6422         gen_helper_per_check_exception(cpu_env);
6423     }
6424 #endif
6425 
6426 out:
6427     /* Advance to the next instruction.  */
6428     s->base.pc_next = s->pc_tmp;
6429     return ret;
6430 }
6431 
6432 static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6433 {
6434     DisasContext *dc = container_of(dcbase, DisasContext, base);
6435 
6436     /* 31-bit mode */
6437     if (!(dc->base.tb->flags & FLAG_MASK_64)) {
6438         dc->base.pc_first &= 0x7fffffff;
6439         dc->base.pc_next = dc->base.pc_first;
6440     }
6441 
6442     dc->cc_op = CC_OP_DYNAMIC;
6443     dc->ex_value = dc->base.tb->cs_base;
6444     dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value;
6445 }
6446 
6447 static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
6448 {
6449 }
6450 
6451 static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6452 {
6453     DisasContext *dc = container_of(dcbase, DisasContext, base);
6454 
6455     /* Delay the set of ilen until we've read the insn. */
6456     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0);
6457     dc->insn_start = tcg_last_op();
6458 }
6459 
6460 static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
6461                                 uint64_t pc)
6462 {
6463     uint64_t insn = cpu_lduw_code(env, pc);
6464 
6465     return pc + get_ilen((insn >> 8) & 0xff);
6466 }
6467 
6468 static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6469 {
6470     CPUS390XState *env = cs->env_ptr;
6471     DisasContext *dc = container_of(dcbase, DisasContext, base);
6472 
6473     dc->base.is_jmp = translate_one(env, dc);
6474     if (dc->base.is_jmp == DISAS_NEXT) {
6475         if (dc->ex_value ||
6476             !is_same_page(dcbase, dc->base.pc_next) ||
6477             !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next))) {
6478             dc->base.is_jmp = DISAS_TOO_MANY;
6479         }
6480     }
6481 }
6482 
6483 static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6484 {
6485     DisasContext *dc = container_of(dcbase, DisasContext, base);
6486 
6487     switch (dc->base.is_jmp) {
6488     case DISAS_NORETURN:
6489         break;
6490     case DISAS_TOO_MANY:
6491         update_psw_addr(dc);
6492         /* FALLTHRU */
6493     case DISAS_PC_UPDATED:
6494         /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
6495            cc op type is in env */
6496         update_cc_op(dc);
6497         /* FALLTHRU */
6498     case DISAS_PC_CC_UPDATED:
6499         /* Exit the TB, either by raising a debug exception or by return.  */
6500         if (dc->exit_to_mainloop) {
6501             tcg_gen_exit_tb(NULL, 0);
6502         } else {
6503             tcg_gen_lookup_and_goto_ptr();
6504         }
6505         break;
6506     default:
6507         g_assert_not_reached();
6508     }
6509 }
6510 
6511 static void s390x_tr_disas_log(const DisasContextBase *dcbase,
6512                                CPUState *cs, FILE *logfile)
6513 {
6514     DisasContext *dc = container_of(dcbase, DisasContext, base);
6515 
6516     if (unlikely(dc->ex_value)) {
6517         /* ??? Unfortunately target_disas can't use host memory.  */
6518         fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
6519     } else {
6520         fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
6521         target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
6522     }
6523 }
6524 
6525 static const TranslatorOps s390x_tr_ops = {
6526     .init_disas_context = s390x_tr_init_disas_context,
6527     .tb_start           = s390x_tr_tb_start,
6528     .insn_start         = s390x_tr_insn_start,
6529     .translate_insn     = s390x_tr_translate_insn,
6530     .tb_stop            = s390x_tr_tb_stop,
6531     .disas_log          = s390x_tr_disas_log,
6532 };
6533 
6534 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
6535                            target_ulong pc, void *host_pc)
6536 {
6537     DisasContext dc;
6538 
6539     translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
6540 }
6541 
6542 void s390x_restore_state_to_opc(CPUState *cs,
6543                                 const TranslationBlock *tb,
6544                                 const uint64_t *data)
6545 {
6546     S390CPU *cpu = S390_CPU(cs);
6547     CPUS390XState *env = &cpu->env;
6548     int cc_op = data[1];
6549 
6550     env->psw.addr = data[0];
6551 
6552     /* Update the CC opcode if it is not already up-to-date.  */
6553     if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
6554         env->cc_op = cc_op;
6555     }
6556 
6557     /* Record ILEN.  */
6558     env->int_pgm_ilen = data[2];
6559 }
6560